code stringlengths 17 6.64M |
|---|
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=8, bottleneck_width=64)
|
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=32, bottleneck_width=4)
|
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
|
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.relu(out)
return out
|
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += shortcut
return out
|
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def SENet18():
return SENet(PreActBlock, [2, 2, 2, 2])
|
def test():
net = SENet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'
(N, C, H, W) = x.size()
g = self.groups
return x.view(N, g, (C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)
|
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = (out_planes / 4)
g = (1 if (in_planes == 24) else groups)
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if (stride == 2):
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = (F.relu(torch.cat([out, res], 1)) if (self.stride == 2) else F.relu((out + res)))
return out
|
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = (2 if (i == 0) else 1)
cat_planes = (self.in_planes if (i == 0) else 0)
layers.append(Bottleneck(self.in_planes, (out_planes - cat_planes), stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def ShuffleNetG2():
cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2}
return ShuffleNet(cfg)
|
def ShuffleNetG3():
cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3}
return ShuffleNet(cfg)
|
def test():
net = ShuffleNetG2()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
|
def VGG19():
return VGG('VGG19')
|
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
|
def test():
net = VGG('VGG11')
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
|
def fmad(ys, xs, dxs):
v = [t.zeros_like(y, requires_grad=True) for y in ys]
g = grad(ys, xs, grad_outputs=v, create_graph=True)
return grad(g, v, grad_outputs=dxs)
|
def arg_parse():
parser = argparse.ArgumentParser(description='GcnInformax Arguments.')
parser.add_argument('--DS', dest='DS', help='Dataset')
parser.add_argument('--local', dest='local', action='store_const', const=True, default=False)
parser.add_argument('--glob', dest='glob', action='store_const', const=True, default=False)
parser.add_argument('--prior', dest='prior', action='store_const', const=True, default=False)
parser.add_argument('--lr', dest='lr', type=float, help='Learning rate.')
parser.add_argument('--num-gc-layers', dest='num_gc_layers', type=int, default=5, help='Number of graph convolution layers before each pooling')
parser.add_argument('--hidden-dim', dest='hidden_dim', type=int, default=32, help='')
parser.add_argument('--repeats', dest='repeats', type=int, default=1, help='')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='')
return parser.parse_args()
|
def raise_measure_error(measure):
supported_measures = ['GAN', 'JSD', 'JSD_hard', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1']
raise NotImplementedError('Measure `{}` not supported. Supported: {}'.format(measure, supported_measures))
|
def get_positive_expectation(p_samples, measure, average=True, tau_plus=0.5):
'Computes the positive part of a divergence / difference.\n\n Args:\n p_samples: Positive samples.\n measure: Measure to compute for.\n average: Average the result over samples.\n\n Returns:\n torch.Tensor\n\n '
log_2 = math.log(2.0)
if (measure == 'GAN'):
Ep = (- F.softplus((- p_samples)))
elif (measure == 'JSD'):
Ep = (log_2 - F.softplus((- p_samples)))
elif (measure == 'JSD_hard'):
Ep = ((log_2 - F.softplus((- p_samples))) - ((tau_plus / (1 - tau_plus)) * (F.softplus((- p_samples)) + p_samples)))
elif (measure == 'X2'):
Ep = (p_samples ** 2)
elif (measure == 'KL'):
Ep = (p_samples + 1.0)
elif (measure == 'RKL'):
Ep = (- torch.exp((- p_samples)))
elif (measure == 'DV'):
Ep = p_samples
elif (measure == 'H2'):
Ep = (1.0 - torch.exp((- p_samples)))
elif (measure == 'W1'):
Ep = p_samples
else:
raise_measure_error(measure)
if average:
return Ep.mean()
else:
return Ep
|
def get_negative_expectation(q_samples, measure, average=True, beta=0, tau_plus=0.5):
'Computes the negative part of a divergence / difference.\n\n Args:\n q_samples: Negative samples.\n measure: Measure to compute for.\n average: Average the result over samples.\n\n Returns:\n torch.Tensor\n\n '
log_2 = math.log(2.0)
if (measure == 'GAN'):
Eq = (F.softplus((- q_samples)) + q_samples)
elif (measure == 'JSD'):
Eq = ((F.softplus((- q_samples)) + q_samples) - log_2)
elif (measure == 'JSD_hard'):
if (beta == 0):
Eq = get_negative_expectation(q_samples, measure='JSD', average=average, beta=0)
Eq = (Eq / (1 - tau_plus))
else:
Eq = (F.softplus((- q_samples)) + q_samples)
reweight = (((- 2) * q_samples) / max(q_samples.max(), q_samples.min().abs()))
reweight = (beta * reweight).exp()
reweight = (reweight / reweight.mean(dim=1).view((- 1), 1))
Eq = (reweight * Eq)
Eq = (Eq / (1 - tau_plus))
Eq -= log_2
elif (measure == 'X2'):
Eq = ((- 0.5) * ((torch.sqrt((q_samples ** 2)) + 1.0) ** 2))
elif (measure == 'KL'):
Eq = torch.exp(q_samples)
elif (measure == 'RKL'):
Eq = (q_samples - 1.0)
elif (measure == 'DV'):
Eq = (log_sum_exp(q_samples, 0) - math.log(q_samples.size(0)))
elif (measure == 'H2'):
Eq = (torch.exp(q_samples) - 1.0)
elif (measure == 'W1'):
Eq = q_samples
else:
raise_measure_error(measure)
if average:
return Eq.mean()
else:
return Eq
|
def infer_conv_size(w, k, s, p):
'Infers the next size after convolution.\n\n Args:\n w: Input size.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n int: Output size.\n\n '
x = ((((w - k) + (2 * p)) // s) + 1)
return x
|
class Convnet(nn.Module):
'Basic convnet convenience class.\n\n Attributes:\n conv_layers: nn.Sequential of nn.Conv2d layers with batch norm,\n dropout, nonlinearity.\n fc_layers: nn.Sequential of nn.Linear layers with batch norm,\n dropout, nonlinearity.\n reshape: Simple reshape layer.\n conv_shape: Shape of the convolutional output.\n\n '
def __init__(self, *args, **kwargs):
super().__init__()
self.create_layers(*args, **kwargs)
def create_layers(self, shape, conv_args=None, fc_args=None):
'Creates layers\n\n conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)\n fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)\n\n Args:\n shape: Shape of input.\n conv_args: List of tuple of convolutional arguments.\n fc_args: List of tuple of fully-connected arguments.\n '
(self.conv_layers, self.conv_shape) = self.create_conv_layers(shape, conv_args)
(dim_x, dim_y, dim_out) = self.conv_shape
dim_r = ((dim_x * dim_y) * dim_out)
self.reshape = View((- 1), dim_r)
(self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args)
def create_conv_layers(self, shape, conv_args):
'Creates a set of convolutional layers.\n\n Args:\n shape: Input shape.\n conv_args: List of tuple of convolutional arguments.\n\n Returns:\n nn.Sequential: a sequence of convolutional layers.\n\n '
conv_layers = nn.Sequential()
conv_args = (conv_args or [])
(dim_x, dim_y, dim_in) = shape
for (i, (dim_out, f, s, p, batch_norm, dropout, nonlinearity, pool)) in enumerate(conv_args):
name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1))
conv_block = nn.Sequential()
if (dim_out is not None):
conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=(not batch_norm))
conv_block.add_module((name + 'conv'), conv)
(dim_x, dim_y) = self.next_size(dim_x, dim_y, f, s, p)
else:
dim_out = dim_in
if dropout:
conv_block.add_module((name + 'do'), nn.Dropout2d(p=dropout))
if batch_norm:
bn = nn.BatchNorm2d(dim_out)
conv_block.add_module((name + 'bn'), bn)
if nonlinearity:
nonlinearity = get_nonlinearity(nonlinearity)
conv_block.add_module(nonlinearity.__class__.__name__, nonlinearity)
if pool:
(pool_type, kernel, stride) = pool
Pool = getattr(nn, pool_type)
conv_block.add_module((name + 'pool'), Pool(kernel_size=kernel, stride=stride))
(dim_x, dim_y) = self.next_size(dim_x, dim_y, kernel, stride, 0)
conv_layers.add_module(name, conv_block)
dim_in = dim_out
dim_out = dim_in
return (conv_layers, (dim_x, dim_y, dim_out))
def create_linear_layers(self, dim_in, fc_args):
'\n\n Args:\n dim_in: Number of input units.\n fc_args: List of tuple of fully-connected arguments.\n\n Returns:\n nn.Sequential.\n\n '
fc_layers = nn.Sequential()
fc_args = (fc_args or [])
for (i, (dim_out, batch_norm, dropout, nonlinearity)) in enumerate(fc_args):
name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1))
fc_block = nn.Sequential()
if (dim_out is not None):
fc_block.add_module((name + 'fc'), nn.Linear(dim_in, dim_out))
else:
dim_out = dim_in
if dropout:
fc_block.add_module((name + 'do'), nn.Dropout(p=dropout))
if batch_norm:
bn = nn.BatchNorm1d(dim_out)
fc_block.add_module((name + 'bn'), bn)
if nonlinearity:
nonlinearity = get_nonlinearity(nonlinearity)
fc_block.add_module(nonlinearity.__class__.__name__, nonlinearity)
fc_layers.add_module(name, fc_block)
dim_in = dim_out
return (fc_layers, dim_in)
def next_size(self, dim_x, dim_y, k, s, p):
'Infers the next size of a convolutional layer.\n\n Args:\n dim_x: First dimension.\n dim_y: Second dimension.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n (int, int): (First output dimension, Second output dimension)\n\n '
if isinstance(k, int):
(kx, ky) = (k, k)
else:
(kx, ky) = k
if isinstance(s, int):
(sx, sy) = (s, s)
else:
(sx, sy) = s
if isinstance(p, int):
(px, py) = (p, p)
else:
(px, py) = p
return (infer_conv_size(dim_x, kx, sx, px), infer_conv_size(dim_y, ky, sy, py))
def forward(self, x: torch.Tensor, return_full_list=False):
'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '
if return_full_list:
conv_out = []
for conv_layer in self.conv_layers:
x = conv_layer(x)
conv_out.append(x)
else:
conv_out = self.conv_layers(x)
x = conv_out
x = self.reshape(x)
if return_full_list:
fc_out = []
for fc_layer in self.fc_layers:
x = fc_layer(x)
fc_out.append(x)
else:
fc_out = self.fc_layers(x)
return (conv_out, fc_out)
|
class FoldedConvnet(Convnet):
'Convnet with strided crop input.\n\n '
def create_layers(self, shape, crop_size=8, conv_args=None, fc_args=None):
'Creates layers\n\n conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)\n fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)\n\n Args:\n shape: Shape of input.\n crop_size: Size of crops\n conv_args: List of tuple of convolutional arguments.\n fc_args: List of tuple of fully-connected arguments.\n '
self.crop_size = crop_size
(dim_x, dim_y, dim_in) = shape
if (dim_x != dim_y):
raise ValueError('x and y dimensions must be the same to use Folded encoders.')
self.final_size = ((2 * (dim_x // self.crop_size)) - 1)
self.unfold = Unfold(dim_x, self.crop_size)
self.refold = Fold(dim_x, self.crop_size)
shape = (self.crop_size, self.crop_size, dim_in)
(self.conv_layers, self.conv_shape) = self.create_conv_layers(shape, conv_args)
(dim_x, dim_y, dim_out) = self.conv_shape
dim_r = ((dim_x * dim_y) * dim_out)
self.reshape = View((- 1), dim_r)
(self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args)
def create_conv_layers(self, shape, conv_args):
'Creates a set of convolutional layers.\n\n Args:\n shape: Input shape.\n conv_args: List of tuple of convolutional arguments.\n\n Returns:\n nn.Sequential: A sequence of convolutional layers.\n\n '
conv_layers = nn.Sequential()
conv_args = (conv_args or [])
(dim_x, dim_y, dim_in) = shape
for (i, (dim_out, f, s, p, batch_norm, dropout, nonlinearity, pool)) in enumerate(conv_args):
name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1))
conv_block = nn.Sequential()
if (dim_out is not None):
conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=(not batch_norm))
conv_block.add_module((name + 'conv'), conv)
(dim_x, dim_y) = self.next_size(dim_x, dim_y, f, s, p)
else:
dim_out = dim_in
if dropout:
conv_block.add_module((name + 'do'), nn.Dropout2d(p=dropout))
if batch_norm:
bn = nn.BatchNorm2d(dim_out)
conv_block.add_module((name + 'bn'), bn)
if nonlinearity:
nonlinearity = get_nonlinearity(nonlinearity)
conv_block.add_module(nonlinearity.__class__.__name__, nonlinearity)
if pool:
(pool_type, kernel, stride) = pool
Pool = getattr(nn, pool_type)
conv_block.add_module('pool', Pool(kernel_size=kernel, stride=stride))
(dim_x, dim_y) = self.next_size(dim_x, dim_y, kernel, stride, 0)
conv_layers.add_module(name, conv_block)
dim_in = dim_out
if (dim_x != dim_y):
raise ValueError('dim_x and dim_y do not match.')
if (dim_x == 1):
dim_x = self.final_size
dim_y = self.final_size
dim_out = dim_in
return (conv_layers, (dim_x, dim_y, dim_out))
def forward(self, x: torch.Tensor, return_full_list=False):
'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '
x = self.unfold(x)
conv_out = []
for conv_layer in self.conv_layers:
x = conv_layer(x)
if (x.size(2) == 1):
x = self.refold(x)
conv_out.append(x)
x = self.reshape(x)
if return_full_list:
fc_out = []
for fc_layer in self.fc_layers:
x = fc_layer(x)
fc_out.append(x)
else:
fc_out = self.fc_layers(x)
if (not return_full_list):
conv_out = conv_out[(- 1)]
return (conv_out, fc_out)
|
def create_encoder(Module):
class Encoder(Module):
'Encoder used for cortex_DIM.\n\n '
def __init__(self, *args, local_idx=None, multi_idx=None, conv_idx=None, fc_idx=None, **kwargs):
'\n\n Args:\n args: Arguments for parent class.\n local_idx: Index in list of convolutional layers for local features.\n multi_idx: Index in list of convolutional layers for multiple globals.\n conv_idx: Index in list of convolutional layers for intermediate features.\n fc_idx: Index in list of fully-connected layers for intermediate features.\n kwargs: Keyword arguments for the parent class.\n '
super().__init__(*args, **kwargs)
if (local_idx is None):
raise ValueError('`local_idx` must be set')
conv_idx = (conv_idx or local_idx)
self.local_idx = local_idx
self.multi_idx = multi_idx
self.conv_idx = conv_idx
self.fc_idx = fc_idx
def forward(self, x: torch.Tensor):
'\n\n Args:\n x: Input tensor.\n\n Returns:\n local_out, multi_out, hidden_out, global_out\n\n '
outs = super().forward(x, return_full_list=True)
if (len(outs) == 2):
(conv_out, fc_out) = outs
else:
(conv_before_out, res_out, conv_after_out, fc_out) = outs
conv_out = ((conv_before_out + res_out) + conv_after_out)
local_out = conv_out[self.local_idx]
if (self.multi_idx is not None):
multi_out = conv_out[self.multi_idx]
else:
multi_out = None
if (len(fc_out) > 0):
if (self.fc_idx is not None):
hidden_out = fc_out[self.fc_idx]
else:
hidden_out = None
global_out = fc_out[(- 1)]
else:
hidden_out = None
global_out = None
conv_out = conv_out[self.conv_idx]
return (local_out, conv_out, multi_out, hidden_out, global_out)
return Encoder
|
class ConvnetEncoder(create_encoder(Convnet)):
pass
|
class FoldedConvnetEncoder(create_encoder(FoldedConvnet)):
pass
|
class ResnetEncoder(create_encoder(ResNet)):
pass
|
class FoldedResnetEncoder(create_encoder(FoldedResNet)):
pass
|
class MIFCNet(nn.Module):
'Simple custom network for computing MI.\n\n '
def __init__(self, n_input, n_units):
'\n\n Args:\n n_input: Number of input units.\n n_units: Number of output units.\n '
super().__init__()
assert (n_units >= n_input)
self.linear_shortcut = nn.Linear(n_input, n_units)
self.block_nonlinear = nn.Sequential(nn.Linear(n_input, n_units), nn.BatchNorm1d(n_units), nn.ReLU(), nn.Linear(n_units, n_units))
eye_mask = np.zeros((n_units, n_input), dtype=np.uint8)
for i in range(n_input):
eye_mask[(i, i)] = 1
self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def forward(self, x):
'\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: network output.\n\n '
h = (self.block_nonlinear(x) + self.linear_shortcut(x))
return h
|
class MI1x1ConvNet(nn.Module):
'Simple custorm 1x1 convnet.\n\n '
def __init__(self, n_input, n_units):
'\n\n Args:\n n_input: Number of input units.\n n_units: Number of output units.\n '
super().__init__()
self.block_nonlinear = nn.Sequential(nn.Conv1d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm1d(n_units), nn.ReLU(), nn.Conv1d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True))
self.block_ln = nn.Sequential(Permute(0, 2, 1), nn.LayerNorm(n_units), Permute(0, 2, 1))
self.linear_shortcut = nn.Conv1d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False)
if (n_units >= n_input):
eye_mask = np.zeros((n_units, n_input, 1), dtype=np.uint8)
for i in range(n_input):
eye_mask[(i, i, 0)] = 1
self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def forward(self, x):
'\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: network output.\n\n '
h = self.block_ln((self.block_nonlinear(x) + self.linear_shortcut(x)))
return h
|
class View(torch.nn.Module):
'Basic reshape module.\n\n '
def __init__(self, *shape):
'\n\n Args:\n *shape: Input shape.\n '
super().__init__()
self.shape = shape
def forward(self, input):
'Reshapes tensor.\n\n Args:\n input: Input tensor.\n\n Returns:\n torch.Tensor: Flattened tensor.\n\n '
return input.view(*self.shape)
|
class Unfold(torch.nn.Module):
'Module for unfolding tensor.\n\n Performs strided crops on 2d (image) tensors. Stride is assumed to be half the crop size.\n\n '
def __init__(self, img_size, fold_size):
'\n\n Args:\n img_size: Input size.\n fold_size: Crop size.\n '
super().__init__()
fold_stride = (fold_size // 2)
self.fold_size = fold_size
self.fold_stride = fold_stride
self.n_locs = ((2 * (img_size // fold_size)) - 1)
self.unfold = torch.nn.Unfold((self.fold_size, self.fold_size), stride=(self.fold_stride, self.fold_stride))
def forward(self, x):
'Unfolds tensor.\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: Unfolded tensor.\n\n '
N = x.size(0)
x = self.unfold(x).reshape(N, (- 1), self.fold_size, self.fold_size, (self.n_locs * self.n_locs)).permute(0, 4, 1, 2, 3).reshape(((N * self.n_locs) * self.n_locs), (- 1), self.fold_size, self.fold_size)
return x
|
class Fold(torch.nn.Module):
'Module (re)folding tensor.\n\n Undoes the strided crops above. Works only on 1x1.\n\n '
def __init__(self, img_size, fold_size):
'\n\n Args:\n img_size: Images size.\n fold_size: Crop size.\n '
super().__init__()
self.n_locs = ((2 * (img_size // fold_size)) - 1)
def forward(self, x):
'(Re)folds tensor.\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: Refolded tensor.\n\n '
(dim_c, dim_x, dim_y) = x.size()[1:]
x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y))
x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y)).permute(0, 2, 3, 1).reshape((- 1), ((dim_c * dim_x) * dim_y), self.n_locs, self.n_locs).contiguous()
return x
|
class Permute(torch.nn.Module):
'Module for permuting axes.\n\n '
def __init__(self, *perm):
'\n\n Args:\n *perm: Permute axes.\n '
super().__init__()
self.perm = perm
def forward(self, input):
'Permutes axes of tensor.\n\n Args:\n input: Input tensor.\n\n Returns:\n torch.Tensor: permuted tensor.\n\n '
return input.permute(*self.perm)
|
class ResBlock(Convnet):
'Residual block for ResNet\n\n '
def create_layers(self, shape, conv_args=None):
'Creates layers\n\n Args:\n shape: Shape of input.\n conv_args: Layer arguments for block.\n '
final_nonlin = conv_args[(- 1)][_nonlin_idx]
conv_args[(- 1)] = list(conv_args[(- 1)])
conv_args[(- 1)][_nonlin_idx] = None
conv_args.append((None, 0, 0, 0, False, False, final_nonlin, None))
super().create_layers(shape, conv_args=conv_args)
if (self.conv_shape != shape):
(dim_x, dim_y, dim_in) = shape
(dim_x_, dim_y_, dim_out) = self.conv_shape
stride = (dim_x // dim_x_)
(next_x, _) = self.next_size(dim_x, dim_y, 1, stride, 0)
assert (next_x == dim_x_), (self.conv_shape, shape)
self.downsample = nn.Sequential(nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=stride, padding=0, bias=False), nn.BatchNorm2d(dim_out))
else:
self.downsample = None
def forward(self, x: torch.Tensor):
'Forward pass\n\n Args:\n x: Input.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '
if (self.downsample is not None):
residual = self.downsample(x)
else:
residual = x
x = self.conv_layers[(- 1)]((self.conv_layers[:(- 1)](x) + residual))
return x
|
class ResNet(Convnet):
def create_layers(self, shape, conv_before_args=None, res_args=None, conv_after_args=None, fc_args=None):
'Creates layers\n\n Args:\n shape: Shape of the input.\n conv_before_args: Arguments for convolutional layers before residuals.\n res_args: Residual args.\n conv_after_args: Arguments for convolutional layers after residuals.\n fc_args: Fully-connected arguments.\n\n '
(dim_x, dim_y, dim_in) = shape
shape = (dim_x, dim_y, dim_in)
(self.conv_before_layers, self.conv_before_shape) = self.create_conv_layers(shape, conv_before_args)
(self.res_layers, self.res_shape) = self.create_res_layers(self.conv_before_shape, res_args)
(self.conv_after_layers, self.conv_after_shape) = self.create_conv_layers(self.res_shape, conv_after_args)
(dim_x, dim_y, dim_out) = self.conv_after_shape
dim_r = ((dim_x * dim_y) * dim_out)
self.reshape = View((- 1), dim_r)
(self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args)
def create_res_layers(self, shape, block_args=None):
'Creates a set of residual blocks.\n\n Args:\n shape: input shape.\n block_args: Arguments for blocks.\n\n Returns:\n nn.Sequential: sequence of residual blocks.\n\n '
res_layers = nn.Sequential()
block_args = (block_args or [])
for (i, (conv_args, n_blocks)) in enumerate(block_args):
block = ResBlock(shape, conv_args=conv_args)
res_layers.add_module('block_{}_0'.format(i), block)
for j in range(1, n_blocks):
shape = block.conv_shape
block = ResBlock(shape, conv_args=conv_args)
res_layers.add_module('block_{}_{}'.format(i, j), block)
shape = block.conv_shape
return (res_layers, shape)
def forward(self, x: torch.Tensor, return_full_list=False):
'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '
if return_full_list:
conv_before_out = []
for conv_layer in self.conv_before_layers:
x = conv_layer(x)
conv_before_out.append(x)
else:
conv_before_out = self.conv_layers(x)
x = conv_before_out
if return_full_list:
res_out = []
for res_layer in self.res_layers:
x = res_layer(x)
res_out.append(x)
else:
res_out = self.res_layers(x)
x = res_out
if return_full_list:
conv_after_out = []
for conv_layer in self.conv_after_layers:
x = conv_layer(x)
conv_after_out.append(x)
else:
conv_after_out = self.conv_after_layers(x)
x = conv_after_out
x = self.reshape(x)
if return_full_list:
fc_out = []
for fc_layer in self.fc_layers:
x = fc_layer(x)
fc_out.append(x)
else:
fc_out = self.fc_layers(x)
return (conv_before_out, res_out, conv_after_out, fc_out)
|
class FoldedResNet(ResNet):
'Resnet with strided crop input.\n\n '
def create_layers(self, shape, crop_size=8, conv_before_args=None, res_args=None, conv_after_args=None, fc_args=None):
'Creates layers\n\n Args:\n shape: Shape of the input.\n crop_size: Size of the crops.\n conv_before_args: Arguments for convolutional layers before residuals.\n res_args: Residual args.\n conv_after_args: Arguments for convolutional layers after residuals.\n fc_args: Fully-connected arguments.\n\n '
self.crop_size = crop_size
(dim_x, dim_y, dim_in) = shape
self.final_size = ((2 * (dim_x // self.crop_size)) - 1)
self.unfold = Unfold(dim_x, self.crop_size)
self.refold = Fold(dim_x, self.crop_size)
shape = (self.crop_size, self.crop_size, dim_in)
(self.conv_before_layers, self.conv_before_shape) = self.create_conv_layers(shape, conv_before_args)
(self.res_layers, self.res_shape) = self.create_res_layers(self.conv_before_shape, res_args)
(self.conv_after_layers, self.conv_after_shape) = self.create_conv_layers(self.res_shape, conv_after_args)
self.conv_after_shape = self.res_shape
(dim_x, dim_y, dim_out) = self.conv_after_shape
dim_r = ((dim_x * dim_y) * dim_out)
self.reshape = View((- 1), dim_r)
(self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args)
def create_res_layers(self, shape, block_args=None):
'Creates a set of residual blocks.\n\n Args:\n shape: input shape.\n block_args: Arguments for blocks.\n\n Returns:\n nn.Sequential: sequence of residual blocks.\n\n '
res_layers = nn.Sequential()
block_args = (block_args or [])
for (i, (conv_args, n_blocks)) in enumerate(block_args):
block = ResBlock(shape, conv_args=conv_args)
res_layers.add_module('block_{}_0'.format(i), block)
for j in range(1, n_blocks):
shape = block.conv_shape
block = ResBlock(shape, conv_args=conv_args)
res_layers.add_module('block_{}_{}'.format(i, j), block)
shape = block.conv_shape
(dim_x, dim_y) = shape[:2]
if (dim_x != dim_y):
raise ValueError('dim_x and dim_y do not match.')
if (dim_x == 1):
shape = (self.final_size, self.final_size, shape[2])
return (res_layers, shape)
def forward(self, x: torch.Tensor, return_full_list=False):
'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n '
x = self.unfold(x)
conv_before_out = []
for conv_layer in self.conv_before_layers:
x = conv_layer(x)
if (x.size(2) == 1):
x = self.refold(x)
conv_before_out.append(x)
res_out = []
for res_layer in self.res_layers:
x = res_layer(x)
res_out.append(x)
if (x.size(2) == 1):
x = self.refold(x)
res_out[(- 1)] = x
conv_after_out = []
for conv_layer in self.conv_after_layers:
x = conv_layer(x)
if (x.size(2) == 1):
x = self.refold(x)
conv_after_out.append(x)
x = self.reshape(x)
if return_full_list:
fc_out = []
for fc_layer in self.fc_layers:
x = fc_layer(x)
fc_out.append(x)
else:
fc_out = self.fc_layers(x)
if (not return_full_list):
conv_before_out = conv_before_out[(- 1)]
res_out = res_out[(- 1)]
conv_after_out = conv_after_out[(- 1)]
return (conv_before_out, res_out, conv_after_out, fc_out)
|
class NormalizedDegree(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, data):
deg = degree(data.edge_index[0], dtype=torch.float)
deg = ((deg - self.mean) / self.std)
data.x = deg.view((- 1), 1)
return data
|
class GcnInfomax(nn.Module):
def __init__(self, hidden_dim, num_gc_layers, alpha=0.5, beta=1.0, gamma=0.1):
super(GcnInfomax, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.prior = args.prior
self.embedding_dim = mi_units = (hidden_dim * num_gc_layers)
self.encoder = Encoder(dataset_num_features, hidden_dim, num_gc_layers)
self.local_d = FF(self.embedding_dim)
self.global_d = FF(self.embedding_dim)
if self.prior:
self.prior_d = PriorDiscriminator(self.embedding_dim)
self.init_emb()
def init_emb(self):
initrange = ((- 1.5) / self.embedding_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if (m.bias is not None):
m.bias.data.fill_(0.0)
def forward(self, x, edge_index, batch, num_graphs, beta):
if (x is None):
x = torch.ones(batch.shape[0]).to(device)
(y, M) = self.encoder(x, edge_index, batch)
g_enc = self.global_d(y)
l_enc = self.local_d(M)
mode = 'fd'
measure = 'JSD_hard'
local_global_loss = local_global_loss_(l_enc, g_enc, edge_index, batch, measure, beta)
if self.prior:
prior = torch.rand_like(y)
term_a = torch.log(self.prior_d(prior)).mean()
term_b = torch.log((1.0 - self.prior_d(y))).mean()
PRIOR = ((- (term_a + term_b)) * self.gamma)
else:
PRIOR = 0
return (local_global_loss + PRIOR)
|
def svc_classify(x, y, search):
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=None)
accuracies = []
for (train_index, test_index) in kf.split(x, y):
(x_train, x_test) = (x[train_index], x[test_index])
(y_train, y_test) = (y[train_index], y[test_index])
if search:
params = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
classifier = GridSearchCV(SVC(), params, cv=5, scoring='accuracy', verbose=0)
else:
classifier = SVC(C=10)
classifier.fit(x_train, y_train)
accuracies.append(accuracy_score(y_test, classifier.predict(x_test)))
return np.mean(accuracies)
|
def evaluate_embedding(embeddings, labels, search=True):
labels = preprocessing.LabelEncoder().fit_transform(labels)
(x, y) = (np.array(embeddings), np.array(labels))
print(x.shape, y.shape)
svc_accuracies = [svc_classify(x, y, search) for _ in range(1)]
print('svc', np.mean(svc_accuracies))
return np.mean(svc_accuracies)
|
class Encoder(torch.nn.Module):
def __init__(self, num_features, dim, num_gc_layers):
super(Encoder, self).__init__()
self.num_gc_layers = num_gc_layers
self.convs = torch.nn.ModuleList()
self.bns = torch.nn.ModuleList()
for i in range(num_gc_layers):
if i:
nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
else:
nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
conv = GINConv(nn)
bn = torch.nn.BatchNorm1d(dim)
self.convs.append(conv)
self.bns.append(bn)
def forward(self, x, edge_index, batch):
if (x is None):
x = torch.ones((batch.shape[0], 1)).to(device)
xs = []
for i in range(self.num_gc_layers):
x = F.relu(self.convs[i](x, edge_index))
x = self.bns[i](x)
xs.append(x)
xpool = [global_add_pool(x, batch) for x in xs]
x = torch.cat(xpool, 1)
return (x, torch.cat(xs, 1))
def get_embeddings(self, loader):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
ret = []
y = []
with torch.no_grad():
for data in loader:
data.to(device)
(x, edge_index, batch) = (data.x, data.edge_index, data.batch)
if (x is None):
x = torch.ones((batch.shape[0], 1)).to(device)
(x, _) = self.forward(x, edge_index, batch)
ret.append(x.cpu().numpy())
y.append(data.y.cpu().numpy())
ret = np.concatenate(ret, 0)
y = np.concatenate(y, 0)
return (ret, y)
|
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
try:
num_features = dataset.num_features
except:
num_features = 1
dim = 32
self.encoder = Encoder(num_features, dim)
self.fc1 = Linear((dim * 5), dim)
self.fc2 = Linear(dim, dataset.num_classes)
def forward(self, x, edge_index, batch):
if (x is None):
x = torch.ones(batch.shape[0]).to(device)
(x, _) = self.encoder(x, edge_index, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1))
|
def train(epoch):
model.train()
if (epoch == 51):
for param_group in optimizer.param_groups:
param_group['lr'] = (0.5 * param_group['lr'])
loss_all = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
output = model(data.x, data.edge_index, data.batch)
loss = F.nll_loss(output, data.y)
loss.backward()
loss_all += (loss.item() * data.num_graphs)
optimizer.step()
return (loss_all / len(train_dataset))
|
def test(loader):
model.eval()
correct = 0
for data in loader:
data = data.to(device)
output = model(data.x, data.edge_index, data.batch)
pred = output.max(dim=1)[1]
correct += pred.eq(data.y).sum().item()
return (correct / len(loader.dataset))
|
def local_global_loss_(l_enc, g_enc, edge_index, batch, measure, beta=0):
'\n Args:\n l: Local feature map.\n g: Global features.\n measure: Type of f-divergence. For use with mode `fd`\n mode: Loss mode. Fenchel-dual `fd`, NCE `nce`, or Donsker-Vadadhan `dv`.\n Returns:\n torch.Tensor: Loss.\n '
num_graphs = g_enc.shape[0]
num_nodes = l_enc.shape[0]
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
pos_mask = torch.zeros((num_nodes, num_graphs)).to(device)
neg_mask = torch.ones((num_nodes, num_graphs)).to(device)
for (nodeidx, graphidx) in enumerate(batch):
pos_mask[nodeidx][graphidx] = 1.0
neg_mask[nodeidx][graphidx] = 0.0
res = torch.mm(l_enc, g_enc.t())
E_pos = get_positive_expectation((res * pos_mask), measure, average=False).sum()
E_pos = (E_pos / num_nodes)
E_neg = get_negative_expectation((res * neg_mask), measure, average=False, beta=beta).sum()
E_neg = (E_neg / (num_nodes * (num_graphs - 1)))
return (E_neg - E_pos)
|
def adj_loss_(l_enc, g_enc, edge_index, batch):
num_graphs = g_enc.shape[0]
num_nodes = l_enc.shape[0]
adj = torch.zeros((num_nodes, num_nodes)).cuda()
mask = torch.eye(num_nodes).cuda()
for (node1, node2) in zip(edge_index[0], edge_index[1]):
adj[node1.item()][node2.item()] = 1.0
adj[node2.item()][node1.item()] = 1.0
res = torch.sigmoid(torch.mm(l_enc, l_enc.t()))
res = ((1 - mask) * res)
loss = nn.BCELoss()(res, adj)
return loss
|
class GlobalDiscriminator(nn.Module):
def __init__(self, args, input_dim):
super().__init__()
self.l0 = nn.Linear(32, 32)
self.l1 = nn.Linear(32, 32)
self.l2 = nn.Linear(512, 1)
def forward(self, y, M, data):
adj = Variable(data['adj'].float(), requires_grad=False).cuda()
batch_num_nodes = data['num_nodes'].int().numpy()
(M, _) = self.encoder(M, adj, batch_num_nodes)
h = torch.cat((y, M), dim=1)
h = F.relu(self.l0(h))
h = F.relu(self.l1(h))
return self.l2(h)
|
class PriorDiscriminator(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.l0 = nn.Linear(input_dim, input_dim)
self.l1 = nn.Linear(input_dim, input_dim)
self.l2 = nn.Linear(input_dim, 1)
def forward(self, x):
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return torch.sigmoid(self.l2(h))
|
class FF(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.block = nn.Sequential(nn.Linear(input_dim, input_dim), nn.ReLU(), nn.Linear(input_dim, input_dim), nn.ReLU(), nn.Linear(input_dim, input_dim), nn.ReLU())
self.linear_shortcut = nn.Linear(input_dim, input_dim)
def forward(self, x):
return (self.block(x) + self.linear_shortcut(x))
|
class Model(nn.Module):
def __init__(self, feature_dim=128):
super(Model, self).__init__()
self.f = []
for (name, module) in resnet50().named_children():
if (name == 'conv1'):
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if ((not isinstance(module, nn.Linear)) and (not isinstance(module, nn.MaxPool2d))):
self.f.append(module)
self.f = nn.Sequential(*self.f)
self.g = nn.Sequential(nn.Linear(2048, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return (F.normalize(feature, dim=(- 1)), F.normalize(out, dim=(- 1)))
|
class CIFAR10Pair(CIFAR10):
def __getitem__(self, index):
(img, target) = (self.data[index], self.targets[index])
img = Image.fromarray(img)
if (self.transform is not None):
pos_1 = self.transform(img)
pos_2 = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (pos_1, pos_2, target)
|
class CIFAR100Pair_true_label(CIFAR100):
def __init__(self, root='../data', train=True, transform=None):
super().__init__(root=root, train=train, transform=transform)
def get_labels(i):
return [index for index in range(len(self)) if (self.targets[index] == i)]
self.label_index = [get_labels(i) for i in range(100)]
def __getitem__(self, index):
(img1, target) = (self.data[index], self.targets[index])
index_example_same_label = sample(self.label_index[self.targets[index]], 1)[0]
img2 = self.data[index_example_same_label]
img1 = Image.fromarray(img1)
img2 = Image.fromarray(img2)
if (self.transform is not None):
pos_1 = self.transform(img1)
pos_2 = self.transform(img2)
if (self.target_transform is not None):
target = self.target_transform(target)
return (pos_1, pos_2, target)
|
class CIFAR100Pair(CIFAR100):
def __getitem__(self, index):
(img, target) = (self.data[index], self.targets[index])
img = Image.fromarray(img)
if (self.transform is not None):
pos_1 = self.transform(img)
pos_2 = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (pos_1, pos_2, target)
|
class STL10Pair(STL10):
def __getitem__(self, index):
(img, target) = (self.data[index], self.labels[index])
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if (self.transform is not None):
pos_1 = self.transform(img)
pos_2 = self.transform(img)
return (pos_1, pos_2, target)
|
class GaussianBlur(object):
def __init__(self, kernel_size, min=0.1, max=2.0):
self.min = min
self.max = max
self.kernel_size = kernel_size
def __call__(self, sample):
sample = np.array(sample)
prob = np.random.random_sample()
if (prob < 0.5):
sigma = (((self.max - self.min) * np.random.random_sample()) + self.min)
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample
|
def get_dataset(dataset_name, root='../data', pair=True):
if pair:
if (dataset_name == 'cifar10'):
train_data = CIFAR10Pair(root=root, train=True, transform=train_transform)
memory_data = CIFAR10Pair(root=root, train=True, transform=test_transform)
test_data = CIFAR10Pair(root=root, train=False, transform=test_transform)
elif (dataset_name == 'cifar100'):
train_data = CIFAR100Pair(root=root, train=True, transform=train_transform)
memory_data = CIFAR100Pair(root=root, train=True, transform=test_transform)
test_data = CIFAR100Pair(root=root, train=False, transform=test_transform)
elif (dataset_name == 'stl10'):
train_data = STL10Pair(root=root, split='train+unlabeled', transform=train_transform)
memory_data = STL10Pair(root=root, split='train', transform=test_transform)
test_data = STL10Pair(root=root, split='test', transform=test_transform)
elif (dataset_name == 'cifar100_true_label'):
train_data = CIFAR100Pair_true_label(root=root, train=True, transform=train_transform)
memory_data = CIFAR100Pair_true_label(root=root, train=True, transform=test_transform)
test_data = CIFAR100Pair_true_label(root=root, train=False, transform=test_transform)
else:
raise Exception('Invalid dataset name')
elif (dataset_name in ['cifar10', 'cifar10_true_label']):
train_data = CIFAR10(root=root, train=True, transform=train_transform)
memory_data = CIFAR10(root=root, train=True, transform=test_transform)
test_data = CIFAR10(root=root, train=False, transform=test_transform)
elif (dataset_name in ['cifar100', 'cifar100_true_label']):
train_data = CIFAR100(root=root, train=True, transform=train_transform)
memory_data = CIFAR100(root=root, train=True, transform=test_transform)
test_data = CIFAR100(root=root, train=False, transform=test_transform)
elif (dataset_name == 'stl10'):
train_data = STL10(root=root, split='train', transform=train_transform)
memory_data = STL10(root=root, split='train', transform=test_transform)
test_data = STL10(root=root, split='test', transform=test_transform)
else:
raise Exception('Invalid dataset name')
return (train_data, memory_data, test_data)
|
def main():
args = parser.parse_args()
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
if ((args.dist_url == 'env://') and (args.world_size == (- 1))):
args.world_size = int(os.environ['WORLD_SIZE'])
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
|
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
if (args.adversarial_method is None):
model = moco.builder.MoCo(models.__dict__[args.arch], args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp)
if (args.adversarial_method in ['ifm', 'ifm_only']):
model = moco.adv_builder.MoCo(models.__dict__[args.arch], args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp, args.epsilon)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
raise NotImplementedError('Only DistributedDataParallel is supported.')
else:
raise NotImplementedError('Only DistributedDataParallel is supported.')
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.dataset_root, args.dataset, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.aug_plus:
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
else:
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
train_dataset = datasets.ImageFolder(traindir, moco.loader.TwoCropsTransform(transforms.Compose(augmentation)))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
filename = f'/data/scratch/joshrob/opt/moco/{args.dataset}_{args.lr}_{args.batch_size}_{args.method}_{args.epsilon}_{args.epochs}.log'
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename=filename)
|
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, _)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
if (args.method is None):
(output, target) = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
if (args.method == 'ifm'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_orig = criterion(output, target)
loss_adv = criterion(output_adv, target)
loss = (loss_orig + (args.alpha * loss_adv))
loss /= (1 + args.alpha)
if (args.method == 'ifm_only'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_adv = criterion(output_adv, target)
loss = loss_adv
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
|
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
|
def adjust_learning_rate(optimizer, epoch, args):
'Decay the learning rate based on schedule'
lr = args.lr
if args.cos:
lr *= (0.5 * (1.0 + math.cos(((math.pi * epoch) / args.epochs))))
else:
for milestone in args.schedule:
lr *= (0.1 if (epoch >= milestone) else 1.0)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the accuracy over the k top predictions for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
class TwoCropsTransform():
'Take two random crops of one image as the query and key.'
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
|
class GaussianBlur(object):
'Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709'
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
def reformat_incre_equations(x):
result = ''
if (len(x) >= 1):
for eq in x:
if (len(result) == 0):
result += eq[2:(- 2)]
else:
result += (', ' + eq[2:(- 2)])
return result
|
def reformat_equations_from_peano(eq_list):
result = ''
for eq in eq_list.split(','):
if ('eq' in eq):
if (len(result) == 0):
result += eq[(eq.index('eq') + 2):]
else:
result += (', ' + eq[(eq.index('eq') + 2):])
elif ('answer' in eq):
if (len(result) == 0):
result += (eq[(eq.index('answer') + 6):].strip() + ' = ?')
else:
result += ((', ' + eq[(eq.index('answer') + 6):].strip()) + ' = ?')
return result
|
def get_declarative_equations(model, question, prompt, max_tokens, stop_token, temperature):
prompt = prompt.format(question=question)
response = openai.Completion.create(model=model, prompt=prompt, max_tokens=max_tokens, stop=stop_token, temperature=temperature, top_p=1)
result = response['choices'][0]['text']
eq_list = re.findall('\\[\\[.*?\\]\\]', result)
if (len(eq_list) > 0):
return reformat_equations_from_peano(reformat_incre_equations(eq_list))
else:
print()
print(response['choices'][0]['text'])
return response['choices'][0]['text']
|
def get_final_using_sympy(equations):
try:
transformations = ((standard_transformations + (implicit_multiplication_application,)) + (convert_xor,))
if (str(equations) == 'nan'):
return np.nan
equation_list = equations.split(',')
for eq in equation_list:
for c in range(len(eq)):
if (c < (len(eq) - 2)):
if (eq[c].isalpha() and eq[(c + 1)].isalpha() and eq[(c + 2)].isalpha()):
return 'invalid equations'
goal_var = None
goal_expression_list = []
if (equation_list[(- 1)].split('=')[0].strip().isalpha() or (len(equation_list[(- 1)].split('=')[0].strip()) == 2)):
goal_var = equation_list[(- 1)].split('=')[0].strip()
elif ('=' in equation_list[(- 1)]):
for l in (list(string.ascii_lowercase) + list(string.ascii_uppercase)):
if (l not in equation_list[(- 1)]):
goal_var = l
break
if (goal_var is not None):
goal_expression = (((goal_var + ' - (') + equation_list[(- 1)].split('=')[0].strip()) + ')')
goal_expression = parse_expr(goal_expression, transformations=transformations)
goal_expression = sympify(goal_expression)
try:
return float(solve(goal_expression)[0])
except Exception as e:
pass
goal_expression_list.append(goal_expression)
else:
return 'invalid equations'
if (len(equation_list) == 1):
try:
goal_expression = parse_expr(equation_list[0].split('=')[0], transformations=transformations)
return float(sympify(goal_expression))
except Exception as e:
return 'invalid equations'
if (goal_var == None):
return 'no goal found'
for i in range((len(equation_list) - 1)):
sub_eqs = equation_list[i]
if ('?' not in sub_eqs):
try:
sub_eqs_split = sub_eqs.split('=')
sub_eqs = (((sub_eqs_split[0].strip() + ' - (') + sub_eqs_split[1].strip()) + ')')
sub_eqs = parse_expr(sub_eqs, transformations=transformations)
sub_eqs = sympify(sub_eqs)
except Exception as e:
return 'invalid equations'
goal_expression_list.append(sub_eqs)
try:
try:
return float(solve(goal_expression_list)[Symbol(goal_var)])
except Exception as e:
return float(solve(goal_expression_list)[0][Symbol(goal_var)])
except Exception as e:
pass
return 'no solution'
except Exception as e:
print(e)
return 'bug'
|
def get_detector(opt=None):
if (opt.detector == 'yolo'):
from detector.yolo_api import YOLODetector
from detector.yolo_cfg import cfg
return YOLODetector(cfg, opt)
elif (opt.detector == 'tracker'):
from detector.tracker_api import Tracker
from detector.tracker_cfg import cfg
return Tracker(cfg, opt)
else:
raise NotImplementedError
|
class BaseDetector(ABC):
def __init__(self):
pass
@abstractmethod
def image_preprocess(self, img_name):
pass
@abstractmethod
def images_detection(self, imgs, orig_dim_list):
pass
@abstractmethod
def detect_one_img(self, img_name):
pass
|
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
Removed = 3
|
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed
|
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert (self.data_type == 'mot')
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
gt_objs = self.gt_frame_dict.get(frame_id, [])
(gt_tlwhs, gt_ids) = unzip_objs(gt_objs)[:2]
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
(match_is, match_js) = mm.lap.linear_sum_assignment(iou_distance)
(match_is, match_js) = map((lambda a: np.asarray(a, dtype=int)), [match_is, match_js])
match_ious = iou_distance[(match_is, match_js)]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
self.acc.update(gt_ids, trk_ids, iou_distance)
if (rtn_events and (iou_distance.size > 0) and hasattr(self.acc, 'last_mot_events')):
events = self.acc.last_mot_events
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list((set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
(trk_tlwhs, trk_ids) = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if (metrics is None):
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(accs, metrics=metrics, names=names, generate_overall=True)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
|
def write_results(filename, results_dict: Dict, data_type: str):
if (not filename):
return
path = os.path.dirname(filename)
if (not os.path.exists(path)):
os.makedirs(path)
if (data_type in ('mot', 'mcmot', 'lab')):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif (data_type == 'kitti'):
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for (frame_id, frame_data) in results_dict.items():
if (data_type == 'kitti'):
frame_id -= 1
for (tlwh, track_id) in frame_data:
if (track_id < 0):
continue
(x1, y1, w, h) = tlwh
(x2, y2) = ((x1 + w), (y1 + h))
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
f.write(line)
logger.info('Save results to {}'.format(filename))
|
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if (data_type in ('mot', 'lab')):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
|
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if (len(linelist) < 7):
continue
fid = int(linelist[0])
if (fid < 1):
continue
results_dict.setdefault(fid, list())
if is_gt:
if (('MOT16-' in filename) or ('MOT17-' in filename)):
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if ((mark == 0) or (label not in valid_labels)):
continue
score = 1
elif is_ignore:
if (('MOT16-' in filename) or ('MOT17-' in filename)):
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if ((label not in ignore_labels) and (vis_ratio >= 0)):
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
|
def unzip_objs(objs):
if (len(objs) > 0):
(tlwhs, ids, scores) = zip(*objs)
else:
(tlwhs, ids, scores) = ([], [], [])
tlwhs = np.asarray(tlwhs, dtype=float).reshape((- 1), 4)
return (tlwhs, ids, scores)
|
def get_logger(name='root'):
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
|
def parse_model_cfg(path):
'Parses the yolo-v3 layer configuration file and returns module definitions'
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if (x and (not x.startswith('#')))]
lines = [x.rstrip().lstrip() for x in lines]
module_defs = []
for line in lines:
if line.startswith('['):
module_defs.append({})
module_defs[(- 1)]['type'] = line[1:(- 1)].rstrip()
if (module_defs[(- 1)]['type'] == 'convolutional'):
module_defs[(- 1)]['batch_normalize'] = 0
else:
(key, value) = line.split('=')
value = value.strip()
module_defs[(- 1)][key.rstrip()] = value.strip()
return module_defs
|
def parse_data_cfg(path):
'Parses the data configuration file'
options = dict()
options['gpus'] = '0'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if ((line == '') or line.startswith('#')):
continue
(key, value) = line.split('=')
options[key.strip()] = value.strip()
return options
|
class Timer(object):
'A simple timer.'
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
class BoundingBox():
def __init__(self, imageName, classId, x, y, w, h, typeCoordinates=CoordinatesType.Absolute, imgSize=None, bbType=BBType.GroundTruth, classConfidence=None, format=BBFormat.XYWH):
"Constructor.\n Args:\n imageName: String representing the image name.\n classId: String value representing class id.\n x: Float value representing the X upper-left coordinate of the bounding box.\n y: Float value representing the Y upper-left coordinate of the bounding box.\n w: Float value representing the width bounding box.\n h: Float value representing the height bounding box.\n typeCoordinates: (optional) Enum (Relative or Absolute) represents if the bounding box\n coordinates (x,y,w,h) are absolute or relative to size of the image. Default:'Absolute'.\n imgSize: (optional) 2D vector (width, height)=>(int, int) represents the size of the\n image of the bounding box. If typeCoordinates is 'Relative', imgSize is required.\n bbType: (optional) Enum (Groundtruth or Detection) identifies if the bounding box\n represents a ground truth or a detection. If it is a detection, the classConfidence has\n to be informed.\n classConfidence: (optional) Float value representing the confidence of the detected\n class. If detectionType is Detection, classConfidence needs to be informed.\n format: (optional) Enum (BBFormat.XYWH or BBFormat.XYX2Y2) indicating the format of the\n coordinates of the bounding boxes. BBFormat.XYWH: <left> <top> <width> <height>\n BBFormat.XYX2Y2: <left> <top> <right> <bottom>.\n "
self._imageName = imageName
self._typeCoordinates = typeCoordinates
if ((typeCoordinates == CoordinatesType.Relative) and (imgSize is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if ((bbType == BBType.Detected) and (classConfidence is None)):
raise IOError("For bbType='Detection', it is necessary to inform the classConfidence value.")
self._classConfidence = classConfidence
self._bbType = bbType
self._classId = classId
self._format = format
if (typeCoordinates == CoordinatesType.Relative):
(self._x, self._y, self._w, self._h) = convertToAbsoluteValues(imgSize, (x, y, w, h))
self._width_img = imgSize[0]
self._height_img = imgSize[1]
if (format == BBFormat.XYWH):
self._x2 = self._w
self._y2 = self._h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
else:
raise IOError('For relative coordinates, the format must be XYWH (x,y,width,height)')
else:
self._x = x
self._y = y
if (format == BBFormat.XYWH):
self._w = w
self._h = h
self._x2 = (self._x + self._w)
self._y2 = (self._y + self._h)
else:
self._x2 = w
self._y2 = h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
if (imgSize is None):
self._width_img = None
self._height_img = None
else:
self._width_img = imgSize[0]
self._height_img = imgSize[1]
def getAbsoluteBoundingBox(self, format=BBFormat.XYWH):
if (format == BBFormat.XYWH):
return (self._x, self._y, self._w, self._h)
elif (format == BBFormat.XYX2Y2):
return (self._x, self._y, self._x2, self._y2)
def getRelativeBoundingBox(self, imgSize=None):
if ((imgSize is None) and (self._width_img is None) and (self._height_img is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if (imgSize is None):
return convertToRelativeValues((imgSize[0], imgSize[1]), (self._x, self._y, self._w, self._h))
else:
return convertToRelativeValues((self._width_img, self._height_img), (self._x, self._y, self._w, self._h))
def getImageName(self):
return self._imageName
def getConfidence(self):
return self._classConfidence
def getFormat(self):
return self._format
def getClassId(self):
return self._classId
def getImageSize(self):
return (self._width_img, self._height_img)
def getCoordinatesType(self):
return self._typeCoordinates
def getBBType(self):
return self._bbType
@staticmethod
def compare(det1, det2):
det1BB = det1.getAbsoluteBoundingBox()
det1ImgSize = det1.getImageSize()
det2BB = det2.getAbsoluteBoundingBox()
det2ImgSize = det2.getImageSize()
if ((det1.getClassId() == det2.getClassId()) and (det1.classConfidence == det2.classConfidenc()) and (det1BB[0] == det2BB[0]) and (det1BB[1] == det2BB[1]) and (det1BB[2] == det2BB[2]) and (det1BB[3] == det2BB[3]) and (det1ImgSize[0] == det1ImgSize[0]) and (det2ImgSize[1] == det2ImgSize[1])):
return True
return False
@staticmethod
def clone(boundingBox):
absBB = boundingBox.getAbsoluteBoundingBox(format=BBFormat.XYWH)
newBoundingBox = BoundingBox(boundingBox.getImageName(), boundingBox.getClassId(), absBB[0], absBB[1], absBB[2], absBB[3], typeCoordinates=boundingBox.getCoordinatesType(), imgSize=boundingBox.getImageSize(), bbType=boundingBox.getBBType(), classConfidence=boundingBox.getConfidence(), format=BBFormat.XYWH)
return newBoundingBox
|
class BoundingBoxes():
def __init__(self):
self._boundingBoxes = []
def addBoundingBox(self, bb):
self._boundingBoxes.append(bb)
def removeBoundingBox(self, _boundingBox):
for d in self._boundingBoxes:
if BoundingBox.compare(d, _boundingBox):
del self._boundingBoxes[d]
return
def removeAllBoundingBoxes(self):
self._boundingBoxes = []
def getBoundingBoxes(self):
return self._boundingBoxes
def getBoundingBoxByClass(self, classId):
boundingBoxes = []
for d in self._boundingBoxes:
if (d.getClassId() == classId):
boundingBoxes.append(d)
return boundingBoxes
def getClasses(self):
classes = []
for d in self._boundingBoxes:
c = d.getClassId()
if (c not in classes):
classes.append(c)
return classes
def getBoundingBoxesByType(self, bbType):
return [d for d in self._boundingBoxes if (d.getBBType() == bbType)]
def getBoundingBoxesByImageName(self, imageName):
return [d for d in self._boundingBoxes if (d.getImageName() == imageName)]
def count(self, bbType=None):
if (bbType is None):
return len(self._boundingBoxes)
count = 0
for d in self._boundingBoxes:
if (d.getBBType() == bbType):
count += 1
return count
def clone(self):
newBoundingBoxes = BoundingBoxes()
for d in self._boundingBoxes:
det = BoundingBox.clone(d)
newBoundingBoxes.addBoundingBox(det)
return newBoundingBoxes
def drawAllBoundingBoxes(self, image, imageName):
bbxes = self.getBoundingBoxesByImageName(imageName)
for bb in bbxes:
if (bb.getBBType() == BBType.GroundTruth):
image = add_bb_into_image(image, bb, color=(0, 255, 0))
else:
image = add_bb_into_image(image, bb, color=(255, 0, 0))
return image
|
class Evaluator():
def GetPascalVOCMetrics(self, boundingboxes, IOUThreshold=0.5, method=MethodAveragePrecision.EveryPointInterpolation):
'Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"\n or EveryPointInterpolation" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict[\'class\']: class representing the current dictionary;\n dict[\'precision\']: array with the precision values;\n dict[\'recall\']: array with the recall values;\n dict[\'AP\']: average precision;\n dict[\'interpolated precision\']: interpolated precision values;\n dict[\'interpolated recall\']: interpolated recall values;\n dict[\'total positives\']: total number of ground truth positives;\n dict[\'total TP\']: total number of True Positive detections;\n dict[\'total FP\']: total number of False Negative detections;\n '
ret = []
groundTruths = []
detections = []
classes = []
for bb in boundingboxes.getBoundingBoxes():
if (bb.getBBType() == BBType.GroundTruth):
groundTruths.append([bb.getImageName(), bb.getClassId(), 1, bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)])
else:
detections.append([bb.getImageName(), bb.getClassId(), bb.getConfidence(), bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)])
if (bb.getClassId() not in classes):
classes.append(bb.getClassId())
classes = sorted(classes)
for c in classes:
dects = []
[dects.append(d) for d in detections if (d[1] == c)]
gts = []
[gts.append(g) for g in groundTruths if (g[1] == c)]
npos = len(gts)
dects = sorted(dects, key=(lambda conf: conf[2]), reverse=True)
TP = np.zeros(len(dects))
FP = np.zeros(len(dects))
det = Counter([cc[0] for cc in gts])
for (key, val) in det.items():
det[key] = np.zeros(val)
for d in range(len(dects)):
gt = [gt for gt in gts if (gt[0] == dects[d][0])]
iouMax = sys.float_info.min
for j in range(len(gt)):
iou = Evaluator.iou(dects[d][3], gt[j][3])
if (iou > iouMax):
iouMax = iou
jmax = j
if (iouMax >= IOUThreshold):
if (det[dects[d][0]][jmax] == 0):
TP[d] = 1
det[dects[d][0]][jmax] = 1
else:
FP[d] = 1
else:
FP[d] = 1
acc_FP = np.cumsum(FP)
acc_TP = np.cumsum(TP)
rec = (acc_TP / npos)
prec = np.divide(acc_TP, (acc_FP + acc_TP))
if (method == MethodAveragePrecision.EveryPointInterpolation):
[ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
else:
[ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)
r = {'class': c, 'precision': prec, 'recall': rec, 'AP': ap, 'interpolated precision': mpre, 'interpolated recall': mrec, 'total positives': npos, 'total TP': np.sum(TP), 'total FP': np.sum(FP)}
ret.append(r)
return ret
def PlotPrecisionRecallCurve(self, boundingBoxes, IOUThreshold=0.5, method=MethodAveragePrecision.EveryPointInterpolation, showAP=False, showInterpolatedPrecision=False, savePath=None, showGraphic=True):
'PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"\n or EveryPointInterpolation" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict[\'class\']: class representing the current dictionary;\n dict[\'precision\']: array with the precision values;\n dict[\'recall\']: array with the recall values;\n dict[\'AP\']: average precision;\n dict[\'interpolated precision\']: interpolated precision values;\n dict[\'interpolated recall\']: interpolated recall values;\n dict[\'total positives\']: total number of ground truth positives;\n dict[\'total TP\']: total number of True Positive detections;\n dict[\'total FP\']: total number of False Negative detections;\n '
results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)
result = None
for result in results:
if (result is None):
raise IOError(('Error: Class %d could not be found.' % classId))
classId = result['class']
precision = result['precision']
recall = result['recall']
average_precision = result['AP']
mpre = result['interpolated precision']
mrec = result['interpolated recall']
npos = result['total positives']
total_tp = result['total TP']
total_fp = result['total FP']
plt.close()
if showInterpolatedPrecision:
if (method == MethodAveragePrecision.EveryPointInterpolation):
plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')
elif (method == MethodAveragePrecision.ElevenPointInterpolation):
nrec = []
nprec = []
for idx in range(len(mrec)):
r = mrec[idx]
if (r not in nrec):
idxEq = np.argwhere((mrec == r))
nrec.append(r)
nprec.append(max([mpre[int(id)] for id in idxEq]))
plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')
plt.plot(recall, precision, label='Precision')
plt.xlabel('recall')
plt.ylabel('precision')
if showAP:
ap_str = '{0:.2f}%'.format((average_precision * 100))
plt.title(('Precision x Recall curve \nClass: %s, AP: %s' % (str(classId), ap_str)))
else:
plt.title(('Precision x Recall curve \nClass: %s' % str(classId)))
plt.legend(shadow=True)
plt.grid()
if (savePath is not None):
plt.savefig(os.path.join(savePath, (classId + '.png')))
if (showGraphic is True):
plt.show()
plt.pause(0.05)
return results
@staticmethod
def CalculateAveragePrecision(rec, prec):
mrec = []
mrec.append(0)
[mrec.append(e) for e in rec]
mrec.append(1)
mpre = []
mpre.append(0)
[mpre.append(e) for e in prec]
mpre.append(0)
for i in range((len(mpre) - 1), 0, (- 1)):
mpre[(i - 1)] = max(mpre[(i - 1)], mpre[i])
ii = []
for i in range((len(mrec) - 1)):
if (mrec[1:][i] != mrec[0:(- 1)][i]):
ii.append((i + 1))
ap = 0
for i in ii:
ap = (ap + np.sum(((mrec[i] - mrec[(i - 1)]) * mpre[i])))
return [ap, mpre[0:(len(mpre) - 1)], mrec[0:(len(mpre) - 1)], ii]
@staticmethod
def ElevenPointInterpolatedAP(rec, prec):
mrec = []
[mrec.append(e) for e in rec]
mpre = []
[mpre.append(e) for e in prec]
recallValues = np.linspace(0, 1, 11)
recallValues = list(recallValues[::(- 1)])
rhoInterp = []
recallValid = []
for r in recallValues:
argGreaterRecalls = np.argwhere((mrec[:] >= r))
pmax = 0
if (argGreaterRecalls.size != 0):
pmax = max(mpre[argGreaterRecalls.min():])
recallValid.append(r)
rhoInterp.append(pmax)
ap = (sum(rhoInterp) / 11)
rvals = []
rvals.append(recallValid[0])
[rvals.append(e) for e in recallValid]
rvals.append(0)
pvals = []
pvals.append(0)
[pvals.append(e) for e in rhoInterp]
pvals.append(0)
cc = []
for i in range(len(rvals)):
p = (rvals[i], pvals[(i - 1)])
if (p not in cc):
cc.append(p)
p = (rvals[i], pvals[i])
if (p not in cc):
cc.append(p)
recallValues = [i[0] for i in cc]
rhoInterp = [i[1] for i in cc]
return [ap, rhoInterp, recallValues, None]
@staticmethod
def _getAllIOUs(reference, detections):
ret = []
bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
for d in detections:
bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
iou = Evaluator.iou(bbReference, bb)
ret.append((iou, reference, d))
return sorted(ret, key=(lambda i: i[0]), reverse=True)
@staticmethod
def iou(boxA, boxB):
if (Evaluator._boxesIntersect(boxA, boxB) is False):
return 0
interArea = Evaluator._getIntersectionArea(boxA, boxB)
union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)
iou = (interArea / union)
assert (iou >= 0)
return iou
@staticmethod
def _boxesIntersect(boxA, boxB):
if (boxA[0] > boxB[2]):
return False
if (boxB[0] > boxA[2]):
return False
if (boxA[3] < boxB[1]):
return False
if (boxA[1] > boxB[3]):
return False
return True
@staticmethod
def _getIntersectionArea(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
return (((xB - xA) + 1) * ((yB - yA) + 1))
@staticmethod
def _getUnionAreas(boxA, boxB, interArea=None):
area_A = Evaluator._getArea(boxA)
area_B = Evaluator._getArea(boxB)
if (interArea is None):
interArea = Evaluator._getIntersectionArea(boxA, boxB)
return float(((area_A + area_B) - interArea))
@staticmethod
def _getArea(box):
return (((box[2] - box[0]) + 1) * ((box[3] - box[1]) + 1))
|
class MethodAveragePrecision(Enum):
'\n Class representing if the coordinates are relative to the\n image size or are absolute values.\n\n Developed by: Rafael Padilla\n Last modification: Apr 28 2018\n '
EveryPointInterpolation = 1
ElevenPointInterpolation = 2
|
class CoordinatesType(Enum):
'\n Class representing if the coordinates are relative to the\n image size or are absolute values.\n\n Developed by: Rafael Padilla\n Last modification: Apr 28 2018\n '
Relative = 1
Absolute = 2
|
class BBType(Enum):
'\n Class representing if the bounding box is groundtruth or not.\n\n Developed by: Rafael Padilla\n Last modification: May 24 2018\n '
GroundTruth = 1
Detected = 2
|
class BBFormat(Enum):
'\n Class representing the format of a bounding box.\n It can be (X,Y,width,height) => XYWH\n or (X1,Y1,X2,Y2) => XYX2Y2\n\n Developed by: Rafael Padilla\n Last modification: May 24 2018\n '
XYWH = 1
XYX2Y2 = 2
|
def convertToRelativeValues(size, box):
dw = (1.0 / size[0])
dh = (1.0 / size[1])
cx = ((box[1] + box[0]) / 2.0)
cy = ((box[3] + box[2]) / 2.0)
w = (box[1] - box[0])
h = (box[3] - box[2])
x = (cx * dw)
y = (cy * dh)
w = (w * dw)
h = (h * dh)
return (x, y, w, h)
|
def convertToAbsoluteValues(size, box):
xIn = round(((((2 * float(box[0])) - float(box[2])) * size[0]) / 2))
yIn = round(((((2 * float(box[1])) - float(box[3])) * size[1]) / 2))
xEnd = (xIn + round((float(box[2]) * size[0])))
yEnd = (yIn + round((float(box[3]) * size[1])))
if (xIn < 0):
xIn = 0
if (yIn < 0):
yIn = 0
if (xEnd >= size[0]):
xEnd = (size[0] - 1)
if (yEnd >= size[1]):
yEnd = (size[1] - 1)
return (xIn, yIn, xEnd, yEnd)
|
def add_bb_into_image(image, bb, color=(255, 0, 0), thickness=2, label=None):
r = int(color[0])
g = int(color[1])
b = int(color[2])
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
fontThickness = 1
(x1, y1, x2, y2) = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cv2.rectangle(image, (x1, y1), (x2, y2), (b, g, r), thickness)
if (label is not None):
(tw, th) = cv2.getTextSize(label, font, fontScale, fontThickness)[0]
(xin_bb, yin_bb) = ((x1 + thickness), ((y1 - th) + int((12.5 * fontScale))))
if ((yin_bb - th) <= 0):
yin_bb = (y1 + th)
r_Xin = (x1 - int((thickness / 2)))
r_Yin = ((y1 - th) - int((thickness / 2)))
cv2.rectangle(image, (r_Xin, (r_Yin - thickness)), (((r_Xin + tw) + (thickness * 3)), ((r_Yin + th) + int((12.5 * fontScale)))), (b, g, r), (- 1))
cv2.putText(image, label, (xin_bb, yin_bb), font, fontScale, (0, 0, 0), fontThickness, cv2.LINE_AA)
return image
|
def ValidateFormats(argFormat, argName, errors):
if (argFormat == 'xywh'):
return BBFormat.XYWH
elif (argFormat == 'xyrb'):
return BBFormat.XYX2Y2
elif (argFormat is None):
return BBFormat.XYWH
else:
errors.append(("argument %s: invalid value. It must be either 'xywh' or 'xyrb'" % argName))
|
def ValidateMandatoryArgs(arg, argName, errors):
if (arg is None):
errors.append(('argument %s: required argument' % argName))
else:
return True
|
def ValidateImageSize(arg, argName, argInformed, errors):
errorMsg = ('argument %s: required argument if %s is relative' % (argName, argInformed))
ret = None
if (arg is None):
errors.append(errorMsg)
else:
arg = arg.replace('(', '').replace(')', '')
args = arg.split(',')
if (len(args) != 2):
errors.append(("%s. It must be in the format 'width,height' (e.g. '600,400')" % errorMsg))
elif ((not args[0].isdigit()) or (not args[1].isdigit())):
errors.append(("%s. It must be in INdiaTEGER the format 'width,height' (e.g. '600,400')" % errorMsg))
else:
ret = (int(args[0]), int(args[1]))
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.