code stringlengths 17 6.64M |
|---|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = nn.Parameter(torch.Tensor(0), requires_grad=False).to(device)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.BatchNorm2d(in_planes), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1))
def forward(self, x):
if (self.noise.numel() == 0):
self.noise.resize_(x.data[0].shape).uniform_()
self.noise = (((2 * self.noise) - 1) * self.level)
y = torch.add(x, self.noise)
z = self.layers(y)
return z
|
class NoiseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBasicBlock, self).__init__()
self.layers = nn.Sequential(NoiseLayer(in_planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.BatchNorm2d(planes))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBottleneck, self).__init__()
self.layers = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False), nn.BatchNorm2d((planes * 4)))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseResNet(nn.Module):
def __init__(self, block, nblocks, nchannels, nfilters, nclasses, pool, level):
super(NoiseResNet, self).__init__()
self.in_planes = nfilters
self.pre_layers = nn.Sequential(nn.Conv2d(nchannels, nfilters, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], level=level)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level)
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level)
self.avgpool = nn.AvgPool2d(pool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, planes, nblocks, stride=1, level=0.2):
shortcut = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.in_planes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, level=level))
self.in_planes = (planes * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.pre_layers(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x6 = self.avgpool(x5)
x7 = x6.view(x6.size(0), (- 1))
x8 = self.linear(x7)
return x8
|
def noiseresnet18(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBasicBlock, [2, 2, 2, 2], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet34(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBasicBlock, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet50(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet101(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 4, 23, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet152(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 8, 36, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, nchannels, nfilters, nclasses=1000):
self.inplanes = nfilters
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(nchannels, nfilters, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(nfilters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, nfilters, layers[0])
self.layer2 = self._make_layer(block, (2 * nfilters), layers[1], stride=2)
self.layer3 = self._make_layer(block, (4 * nfilters), layers[2], stride=2)
self.layer4 = self._make_layer(block, (8 * nfilters), layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x0):
x = self.conv1(x0)
x = self.bn1(x)
x = self.relu(x)
x1 = self.maxpool(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x = self.avgpool(x5)
x = x.view(x.size(0), (- 1))
x6 = self.fc(x)
return [x0, x1, x2, x3, x4, x5]
|
def resnet18(nchannels, nfilters, nclasses):
return ResNet(BasicBlock, [2, 2, 2, 2], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet34(nchannels, nfilters, nclasses):
return ResNet(BasicBlock, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet50(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet101(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 4, 23, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet152(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 8, 36, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
class Image():
def __init__(self, path, ext='png'):
if (os.path.isdir(path) == False):
os.makedirs(path)
self.path = path
self.names = []
self.ext = ext
self.iteration = 1
self.num = 0
def register(self, modules):
self.num = (self.num + len(modules))
for tmp in modules:
self.names.append(tmp)
def update(self, modules):
for i in range(self.num):
name = os.path.join(self.path, ('%s_%03d.png' % (self.names[i], self.iteration)))
nrow = math.ceil(math.sqrt(modules[i].size(0)))
vutils.save_image(modules[i], name, nrow=nrow, padding=0, normalize=True, scale_each=True)
self.iteration = (self.iteration + 1)
|
class Logger():
def __init__(self, path, filename):
self.num = 0
if (os.path.isdir(path) == False):
os.makedirs(path)
self.filename = os.path.join(path, filename)
self.fid = open(self.filename, 'w')
self.fid.close()
def register(self, modules):
self.num = (self.num + len(modules))
tmpstr = ''
for tmp in modules:
tmpstr = ((tmpstr + tmp) + '\t')
tmpstr = (tmpstr + '\n')
self.fid = open(self.filename, 'a')
self.fid.write(tmpstr)
self.fid.close()
def update(self, modules):
tmpstr = ''
for tmp in modules:
tmpstr = ((tmpstr + ('%.4f' % modules[tmp])) + '\t')
tmpstr = (tmpstr + '\n')
self.fid = open(self.filename, 'a')
self.fid.write(tmpstr)
self.fid.close()
|
class Monitor():
def __init__(self, smoothing=True, smoothness=0.7):
self.keys = []
self.losses = {}
self.smoothing = smoothing
self.smoothness = smoothness
self.num = 0
def register(self, modules):
for m in modules:
self.keys.append(m)
self.losses[m] = 0
def reset(self):
self.num = 0
for (key, value) in self.losses.items():
value = 0
def update(self, modules, batch_size):
if (self.smoothing == False):
for (key, value) in modules.items():
self.losses[key] = (((self.losses[key] * self.num) + (value * batch_size)) / (self.num + batch_size))
if (self.smoothing == True):
for (key, value) in modules.items():
temp = (((self.losses[key] * self.num) + (value * batch_size)) / (self.num + batch_size))
self.losses[key] = ((self.losses[key] * self.smoothness) + (value * (1 - self.smoothness)))
self.num += batch_size
def getvalues(self, key=None):
if (key != None):
return self.losses[key]
if (key == None):
return OrderedDict([(key, self.losses[key]) for key in self.keys])
|
class Visualizer():
def __init__(self, port, title):
self.keys = []
self.values = {}
self.viz = visdom.Visdom(port=port)
self.iteration = 0
self.title = title
def register(self, modules):
for key in modules:
self.keys.append(key)
self.values[key] = {}
self.values[key]['dtype'] = modules[key]['dtype']
self.values[key]['vtype'] = modules[key]['vtype']
if (modules[key]['vtype'] == 'plot'):
self.values[key]['value'] = []
self.values[key]['win'] = self.viz.line(X=np.array([0]), Y=np.array([0]), opts=dict(title=self.title, xlabel='Epoch', ylabel=key))
elif (modules[key]['vtype'] == 'image'):
self.values[key]['value'] = None
elif (modules[key]['vtype'] == 'images'):
self.values[key]['value'] = None
else:
sys.exit('Data type not supported, please update the visualizer plugin and rerun !!')
def update(self, modules):
for key in modules:
if (self.values[key]['dtype'] == 'scalar'):
self.values[key]['value'].append(modules[key])
elif (self.values[key]['dtype'] == 'image'):
self.values[key]['value'] = modules[key]
elif (self.values[key]['dtype'] == 'images'):
self.values[key]['value'] = modules[key]
else:
sys.exit('Data type not supported, please update the visualizer plugin and rerun !!')
for key in self.keys:
if (self.values[key]['vtype'] == 'plot'):
self.viz.updateTrace(X=np.array([self.iteration]), Y=np.array([self.values[key]['value'][(- 1)]]), win=self.values[key]['win'])
elif (self.values[key]['vtype'] == 'image'):
temp = self.values[key]['value'].numpy()
for i in range(temp.shape[0]):
temp[i] = (temp[i] - temp[i].min())
temp[i] = (temp[i] / temp[i].max())
if (self.iteration == 0):
self.values[key]['win'] = self.viz.image(temp, opts=dict(title=key, caption=self.iteration))
else:
self.viz.image(temp, opts=dict(title=key, caption=self.iteration), win=self.values[key]['win'])
elif (self.values[key]['vtype'] == 'images'):
temp = self.values[key]['value'].numpy()
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
temp[i][j] = (temp[i][j] - temp[i][j].min())
temp[i][j] = (temp[i][j] / temp[i][j].max())
if (self.iteration == 0):
self.values[key]['win'] = self.viz.images(temp, opts=dict(title=key, caption=self.iteration))
else:
self.viz.images(temp, opts=dict(title=key, caption=self.iteration), win=self.values[key]['win'])
else:
sys.exit('Visualization type not supported, please update the visualizer plugin and rerun !!')
self.iteration = (self.iteration + 1)
|
class Trainer():
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
self.port = args.port
self.dir_save = args.save
self.cuda = args.cuda
self.nepochs = args.nepochs
self.nclasses = args.nclasses
self.nchannels = args.nchannels
self.batch_size = args.batch_size
self.resolution_high = args.resolution_high
self.resolution_wide = args.resolution_wide
self.lr = args.learning_rate
self.momentum = args.momentum
self.adam_beta1 = args.adam_beta1
self.adam_beta2 = args.adam_beta2
self.weight_decay = args.weight_decay
self.optim_method = args.optim_method
self.dataset_train_name = args.dataset_train
parameters = filter((lambda p: p.requires_grad), model.parameters())
if (self.optim_method == 'Adam'):
self.optimizer = optim.Adam(parameters, lr=self.lr, betas=(self.adam_beta1, self.adam_beta2), weight_decay=self.weight_decay)
elif (self.optim_method == 'RMSprop'):
self.optimizer = optim.RMSprop(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)
elif (self.optim_method == 'SGD'):
self.optimizer = optim.SGD(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay, nesterov=True)
else:
raise Exception('Unknown Optimization Method')
self.label = torch.zeros(self.batch_size).long()
self.input = torch.zeros(self.batch_size, self.nchannels, self.resolution_high, self.resolution_wide)
if args.cuda:
self.label = self.label.cuda()
self.input = self.input.cuda()
self.input = Variable(self.input)
self.label = Variable(self.label)
self.log_loss_train = plugins.Logger(args.logs, 'TrainLogger.txt')
self.params_loss_train = ['Loss', 'Accuracy']
self.log_loss_train.register(self.params_loss_train)
self.log_loss_test = plugins.Logger(args.logs, 'TestLogger.txt')
self.params_loss_test = ['Loss', 'Accuracy']
self.log_loss_test.register(self.params_loss_test)
self.monitor_train = plugins.Monitor()
self.params_monitor_train = ['Loss', 'Accuracy']
self.monitor_train.register(self.params_monitor_train)
self.monitor_test = plugins.Monitor()
self.params_monitor_test = ['Loss', 'Accuracy']
self.monitor_test.register(self.params_monitor_test)
self.visualizer_train = plugins.Visualizer(self.port, 'Train')
self.params_visualizer_train = {'Loss': {'dtype': 'scalar', 'vtype': 'plot'}, 'Accuracy': {'dtype': 'scalar', 'vtype': 'plot'}}
self.visualizer_train.register(self.params_visualizer_train)
self.visualizer_test = plugins.Visualizer(self.port, 'Test')
self.params_visualizer_test = {'Loss': {'dtype': 'scalar', 'vtype': 'plot'}, 'Accuracy': {'dtype': 'scalar', 'vtype': 'plot'}}
self.visualizer_test.register(self.params_visualizer_test)
self.print_train = '[%d/%d][%d/%d] '
for item in self.params_loss_train:
self.print_train = ((self.print_train + item) + ' %.4f ')
self.print_test = '[%d/%d][%d/%d] '
for item in self.params_loss_test:
self.print_test = ((self.print_test + item) + ' %.4f ')
self.evalmodules = []
self.giterations = 0
self.losses_test = {}
self.losses_train = {}
def learning_rate(self, epoch):
if (self.dataset_train_name == 'CIFAR10'):
return (self.lr * (((0.1 ** int((epoch >= 60))) * (0.1 ** int((epoch >= 90)))) * (0.1 ** int((epoch >= 120)))))
elif (self.dataset_train_name == 'CIFAR100'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'MNIST'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'FRGC'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'ImageNet'):
decay = math.floor(((epoch - 1) / 30))
return (self.lr * math.pow(0.1, decay))
def get_optimizer(self, epoch, optimizer):
lr = self.learning_rate(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def model_eval(self):
self.model.eval()
for m in self.model.modules():
for i in range(len(self.evalmodules)):
if isinstance(m, self.evalmodules[i]):
m.train()
def model_train(self):
self.model.train()
def train(self, epoch, dataloader):
self.monitor_train.reset()
data_iter = iter(dataloader)
self.input.volatile = False
self.label.volatile = False
self.optimizer = self.get_optimizer((epoch + 1), self.optimizer)
self.model_train()
i = 0
while (i < len(dataloader)):
(input, label) = data_iter.next()
i += 1
batch_size = input.size(0)
if (batch_size == self.batch_size):
self.input.data.resize_(input.size()).copy_(input)
self.label.data.resize_(label.size()).copy_(label)
output = self.model(self.input)
loss = self.criterion(output, self.label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pred = output.data.max(1)[1]
acc = (float((pred.eq(self.label.data).cpu().sum() * 100.0)) / float(batch_size))
self.losses_train['Accuracy'] = float(acc)
self.losses_train['Loss'] = float(loss.data[0])
self.monitor_train.update(self.losses_train, batch_size)
print((self.print_train % tuple(([epoch, self.nepochs, i, len(dataloader)] + [self.losses_train[key] for key in self.params_monitor_train]))))
loss = self.monitor_train.getvalues()
self.log_loss_train.update(loss)
self.visualizer_train.update(loss)
return self.monitor_train.getvalues('Accuracy')
def test(self, epoch, dataloader):
self.monitor_test.reset()
data_iter = iter(dataloader)
self.input.volatile = True
self.label.volatile = True
self.model_eval()
i = 0
while (i < len(dataloader)):
(input, label) = data_iter.next()
i += 1
batch_size = input.size(0)
if (batch_size == self.batch_size):
self.input.data.resize_(input.size()).copy_(input)
self.label.data.resize_(label.size()).copy_(label)
self.model.zero_grad()
output = self.model(self.input)
loss = self.criterion(output, self.label)
pred = output.data.max(1)[1]
acc = (float((pred.eq(self.label.data).cpu().sum() * 100.0)) / float(batch_size))
self.losses_test['Accuracy'] = float(acc)
self.losses_test['Loss'] = float(loss.data[0])
self.monitor_test.update(self.losses_test, batch_size)
print((self.print_test % tuple(([epoch, self.nepochs, i, len(dataloader)] + [self.losses_test[key] for key in self.params_monitor_test]))))
loss = self.monitor_test.getvalues()
self.log_loss_test.update(loss)
self.visualizer_test.update(loss)
return self.monitor_test.getvalues('Accuracy')
|
def readtextfile(filename):
with open(filename) as f:
content = f.readlines()
f.close()
return content
|
def writetextfile(data, filename):
with open(filename, 'w') as f:
f.writelines(data)
f.close()
|
def delete_file(filename):
if (os.path.isfile(filename) == True):
os.remove(filename)
|
def eformat(f, prec, exp_digits):
s = ('%.*e' % (prec, f))
(mantissa, exp) = s.split('e')
return ('%se%+0*d' % (mantissa, (exp_digits + 1), int(exp)))
|
def saveargs(args):
path = args.logs
if (os.path.isdir(path) == False):
os.makedirs(path)
with open(os.path.join(path, 'args.txt'), 'w') as f:
for arg in vars(args):
f.write((((arg + ' ') + str(getattr(args, arg))) + '\n'))
|
class Dataloader():
def __init__(self, args, input_size):
self.args = args
self.dataset_test_name = args.dataset_test
self.dataset_train_name = args.dataset_train
self.input_size = input_size
if (self.dataset_train_name == 'LSUN'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(db_path=args.dataroot, classes=['bedroom_train'], transform=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'CIFAR10') or (self.dataset_train_name == 'CIFAR100')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(self.input_size, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_train_name == 'CocoCaption') or (self.dataset_train_name == 'CocoDetection')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'STL10') or (self.dataset_train_name == 'SVHN')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, split='train', download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'MNIST'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_train_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_train = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_train), transform=transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
elif (self.dataset_train_name == 'FRGC'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'Folder'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'FileList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_train_name == 'FolderList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
if (self.dataset_test_name == 'LSUN'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(db_path=args.dataroot, classes=['bedroom_val'], transform=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'CIFAR10') or (self.dataset_test_name == 'CIFAR100')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_test_name == 'CocoCaption') or (self.dataset_test_name == 'CocoDetection')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'STL10') or (self.dataset_test_name == 'SVHN')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, split='test', download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'MNIST'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_test_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_test = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (self.dataset_test_name == 'FRGC'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'Folder'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'FileList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_test_name == 'FolderList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
def create(self, flag=None):
if (flag == 'Train'):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_train
if (flag == 'Test'):
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_test
if (flag == None):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return (dataloader_train, dataloader_test)
|
class FileList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
self.ifile = ifile
self.lfile = lfile
self.train = train
self.split_test = split_test
self.split_train = split_train
self.transform_test = transform_test
self.transform_train = transform_train
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
if (ifile != None):
imagelist = utils.readtextfile(ifile)
imagelist = [x.rstrip('\n') for x in imagelist]
else:
imagelist = []
if (lfile != None):
labellist = utils.readtextfile(lfile)
labellist = [x.rstrip('\n') for x in labellist]
else:
labellist = []
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((self.split_train < 1.0) & (self.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((self.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((self.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (self.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (self.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
input = {}
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(classlist, labellist=None):
images = []
labels = []
classes = utils.readtextfile(ifile)
classes = [x.rstrip('\n') for x in classes]
classes.sort()
for i in len(classes):
for fname in os.listdir(classes[i]):
if is_image_file(fname):
label = {}
label['class'] = os.path.split(classes[i])
images.append(fname)
labels.append(label)
if (labellist != None):
labels = utils.readtextfile(ifile)
labels = [x.rstrip('\n') for x in labels]
labels.sort()
for i in len(labels):
for fname in os.listdir(labels[i]):
if is_image_file(fname):
labels.append(os.path.split(classes[i]))
return (images, labels)
|
class FolderList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
(imagelist, labellist) = make_dataset(ifile, lfile)
if (len(imagelist) == 0):
raise RuntimeError('No images found')
if (len(labellist) == 0):
raise RuntimeError('No labels found')
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
self.imagelist = imagelist
self.labellist = labellist
self.transform_test = transform_test
self.transform_train = transform_train
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((args.split_train < 1.0) & (args.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((args.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((args.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (args.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (args.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def loader_image(path):
return Image.open(path).convert('RGB')
|
def loader_torch(path):
return torch.load(path)
|
def loader_numpy(path):
return np.load(path)
|
class Model():
def __init__(self, args):
self.cuda = torch.cuda.is_available()
self.lr = args.learning_rate
self.dataset_train_name = args.dataset_train
self.nfilters = args.nfilters
self.batch_size = args.batch_size
self.level = args.level
self.net_type = args.net_type
self.nmasks = args.nmasks
self.unique_masks = args.unique_masks
self.filter_size = args.filter_size
self.first_filter_size = args.first_filter_size
self.scale_noise = args.scale_noise
self.noise_type = args.noise_type
self.act = args.act
self.use_act = args.use_act
self.dropout = args.dropout
self.train_masks = args.train_masks
self.debug = args.debug
self.pool_type = args.pool_type
self.mix_maps = args.mix_maps
if self.dataset_train_name.startswith('CIFAR'):
self.input_size = 32
self.nclasses = 10
if (self.filter_size < 7):
self.avgpool = 4
elif (self.filter_size == 7):
self.avgpool = 1
elif self.dataset_train_name.startswith('MNIST'):
self.nclasses = 10
self.input_size = 28
if (self.filter_size < 7):
self.avgpool = 14
elif (self.filter_size == 7):
self.avgpool = 7
self.model = getattr(models, self.net_type)(nfilters=self.nfilters, avgpool=self.avgpool, nclasses=self.nclasses, nmasks=self.nmasks, unique_masks=self.unique_masks, level=self.level, filter_size=self.filter_size, first_filter_size=self.first_filter_size, act=self.act, scale_noise=self.scale_noise, noise_type=self.noise_type, use_act=self.use_act, dropout=self.dropout, train_masks=self.train_masks, pool_type=self.pool_type, debug=self.debug, input_size=self.input_size, mix_maps=self.mix_maps)
self.loss_fn = nn.CrossEntropyLoss()
if self.cuda:
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
parameters = filter((lambda p: p.requires_grad), self.model.parameters())
if (args.optim_method == 'Adam'):
self.optimizer = optim.Adam(parameters, lr=self.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.weight_decay)
elif (args.optim_method == 'RMSprop'):
self.optimizer = optim.RMSprop(parameters, lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.optim_method == 'SGD'):
self.optimizer = optim.SGD(parameters, lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
"\n # use this to set different learning rates for training noise masks and regular parameters:\n self.optimizer = optim.SGD([{'params': [param for name, param in self.model.named_parameters() if 'noise' not in name]},\n {'params': [param for name, param in self.model.named_parameters() if 'noise' in name], 'lr': self.lr * 10},\n ], lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) #"
else:
raise Exception('Unknown Optimization Method')
def learning_rate(self, epoch):
if (self.dataset_train_name == 'CIFAR10'):
new_lr = (self.lr * (((((0.2 ** int((epoch >= 150))) * (0.2 ** int((epoch >= 250)))) * (0.2 ** int((epoch >= 300)))) * (0.2 ** int((epoch >= 350)))) * (0.2 ** int((epoch >= 400)))))
elif (self.dataset_train_name == 'CIFAR100'):
new_lr = (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'MNIST'):
new_lr = (self.lr * (((0.2 ** int((epoch >= 30))) * (0.2 ** int((epoch >= 60)))) * (0.2 ** int((epoch >= 90)))))
elif (self.dataset_train_name == 'FRGC'):
new_lr = (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'ImageNet'):
decay = math.floor(((epoch - 1) / 30))
new_lr = (self.lr * math.pow(0.1, decay))
return new_lr
def train(self, epoch, dataloader):
self.model.train()
lr = self.learning_rate((epoch + 1))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
losses = []
accuracies = []
for (i, (input, label)) in enumerate(dataloader):
if self.cuda:
label = label.cuda()
input = input.cuda()
output = self.model(input)
loss = self.loss_fn(output, label)
if self.debug:
print('\nBatch:', i)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pred = output.data.max(1)[1]
acc = ((pred.eq(label.data).cpu().sum() * 100.0) / self.batch_size)
losses.append(loss.item())
accuracies.append(acc)
return (np.mean(losses), np.mean(accuracies))
def test(self, dataloader):
self.model.eval()
losses = []
accuracies = []
with torch.no_grad():
for (i, (input, label)) in enumerate(dataloader):
if self.cuda:
label = label.cuda()
input = input.cuda()
output = self.model(input)
loss = self.loss_fn(output, label)
pred = output.data.max(1)[1]
acc = ((pred.eq(label.data).cpu().sum() * 100.0) / self.batch_size)
losses.append(loss.item())
accuracies.append(acc)
return (np.mean(losses), np.mean(accuracies))
|
class PerturbLayerFirst(nn.Module):
def __init__(self, in_channels=None, out_channels=None, nmasks=None, level=None, filter_size=None, debug=False, use_act=False, stride=1, act=None, unique_masks=False, mix_maps=None, train_masks=False, noise_type='uniform', input_size=None):
super(PerturbLayerFirst, self).__init__()
self.nmasks = nmasks
self.unique_masks = unique_masks
self.train_masks = train_masks
self.level = level
self.filter_size = filter_size
self.use_act = use_act
self.act = act_fn('sigmoid')
self.debug = debug
self.noise_type = noise_type
self.in_channels = in_channels
self.input_size = input_size
self.mix_maps = mix_maps
if (filter_size == 1):
padding = 0
bias = True
elif ((filter_size == 3) or (filter_size == 5)):
padding = 1
bias = False
elif (filter_size == 7):
stride = 2
padding = 3
bias = False
if (self.filter_size > 0):
self.noise = None
self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=filter_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(out_channels), self.act)
else:
noise_channels = (in_channels if self.unique_masks else 1)
shape = (1, noise_channels, self.nmasks, input_size, input_size)
self.noise = nn.Parameter(torch.Tensor(*shape), requires_grad=self.train_masks)
if (noise_type == 'uniform'):
self.noise.data.uniform_((- 1), 1)
elif (self.noise_type == 'normal'):
self.noise.data.normal_()
else:
print('\n\nNoise type {} is not supported / understood\n\n'.format(self.noise_type))
if (nmasks != 1):
if ((out_channels % in_channels) != 0):
print('\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n')
groups = in_channels
else:
groups = 1
self.layers = nn.Sequential(nn.BatchNorm2d((in_channels * self.nmasks)), self.act, nn.Conv2d((in_channels * self.nmasks), out_channels, kernel_size=1, stride=1, groups=groups), nn.BatchNorm2d(out_channels), self.act)
if self.mix_maps:
self.mix_layers = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, groups=1), nn.BatchNorm2d(out_channels), self.act)
def forward(self, x):
if (self.filter_size > 0):
return self.layers(x)
else:
y = torch.add(x.unsqueeze(2), (self.noise * self.level))
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
y = y.view((- 1), (self.in_channels * self.nmasks), self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y
|
class PerturbLayer(nn.Module):
def __init__(self, in_channels=None, out_channels=None, nmasks=None, level=None, filter_size=None, debug=False, use_act=False, stride=1, act=None, unique_masks=False, mix_maps=None, train_masks=False, noise_type='uniform', input_size=None):
super(PerturbLayer, self).__init__()
self.nmasks = nmasks
self.unique_masks = unique_masks
self.train_masks = train_masks
self.level = level
self.filter_size = filter_size
self.use_act = use_act
self.act = act_fn(act)
self.debug = debug
self.noise_type = noise_type
self.in_channels = in_channels
self.input_size = input_size
self.mix_maps = mix_maps
if (filter_size == 1):
padding = 0
bias = True
elif ((filter_size == 3) or (filter_size == 5)):
padding = 1
bias = False
elif (filter_size == 7):
stride = 2
padding = 3
bias = False
if (self.filter_size > 0):
self.noise = None
self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=filter_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(out_channels), self.act)
else:
noise_channels = (in_channels if self.unique_masks else 1)
shape = (1, noise_channels, self.nmasks, input_size, input_size)
self.noise = nn.Parameter(torch.Tensor(*shape), requires_grad=self.train_masks)
if (noise_type == 'uniform'):
self.noise.data.uniform_((- 1), 1)
elif (self.noise_type == 'normal'):
self.noise.data.normal_()
else:
print('\n\nNoise type {} is not supported / understood\n\n'.format(self.noise_type))
if (nmasks != 1):
if ((out_channels % in_channels) != 0):
print('\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n')
groups = in_channels
else:
groups = 1
self.layers = nn.Sequential(nn.Conv2d((in_channels * self.nmasks), out_channels, kernel_size=1, stride=1, groups=groups), nn.BatchNorm2d(out_channels), self.act)
if self.mix_maps:
self.mix_layers = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, groups=1), nn.BatchNorm2d(out_channels), self.act)
def forward(self, x):
if (self.filter_size > 0):
return self.layers(x)
else:
y = torch.add(x.unsqueeze(2), (self.noise * self.level))
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
if self.use_act:
y = self.act(y)
y = y.view((- 1), (self.in_channels * self.nmasks), self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y
|
class PerturbBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels=None, out_channels=None, stride=1, shortcut=None, nmasks=None, train_masks=False, level=None, use_act=False, filter_size=None, act=None, unique_masks=False, noise_type=None, input_size=None, pool_type=None, mix_maps=None):
super(PerturbBasicBlock, self).__init__()
self.shortcut = shortcut
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.layers = nn.Sequential(PerturbLayer(in_channels=in_channels, out_channels=out_channels, nmasks=nmasks, input_size=input_size, level=level, filter_size=filter_size, use_act=use_act, train_masks=train_masks, act=act, unique_masks=unique_masks, noise_type=noise_type, mix_maps=mix_maps), pool(stride, stride), PerturbLayer(in_channels=out_channels, out_channels=out_channels, nmasks=nmasks, input_size=(input_size // stride), level=level, filter_size=filter_size, use_act=use_act, train_masks=train_masks, act=act, unique_masks=unique_masks, noise_type=noise_type, mix_maps=mix_maps))
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class PerturbResNet(nn.Module):
def __init__(self, block, nblocks=None, avgpool=None, nfilters=None, nclasses=None, nmasks=None, input_size=32, level=None, filter_size=None, first_filter_size=None, use_act=False, train_masks=False, mix_maps=None, act=None, scale_noise=1, unique_masks=False, debug=False, noise_type=None, pool_type=None):
super(PerturbResNet, self).__init__()
self.nfilters = nfilters
self.unique_masks = unique_masks
self.noise_type = noise_type
self.train_masks = train_masks
self.pool_type = pool_type
self.mix_maps = mix_maps
self.act = act_fn(act)
layers = [PerturbLayerFirst(in_channels=3, out_channels=(3 * nfilters), nmasks=(nfilters * 5), level=((level * scale_noise) * 20), debug=debug, filter_size=first_filter_size, use_act=use_act, train_masks=train_masks, input_size=input_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, mix_maps=mix_maps)]
if (first_filter_size == 7):
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.pre_layers = nn.Sequential(*layers, nn.Conv2d(((self.nfilters * 3) * 1), self.nfilters, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(self.nfilters), self.act)
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], stride=1, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=input_size)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=input_size)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=(input_size // 2))
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=(input_size // 4))
self.avgpool = nn.AvgPool2d(avgpool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, out_channels, nblocks, stride=1, level=0.2, nmasks=None, use_act=False, filter_size=None, act=None, input_size=None):
shortcut = None
if ((stride != 1) or (self.nfilters != (out_channels * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.nfilters, (out_channels * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_channels * block.expansion)))
layers = []
layers.append(block(self.nfilters, out_channels, stride, shortcut, level=level, nmasks=nmasks, use_act=use_act, filter_size=filter_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, train_masks=self.train_masks, input_size=input_size, pool_type=self.pool_type, mix_maps=self.mix_maps))
self.nfilters = (out_channels * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.nfilters, out_channels, level=level, nmasks=nmasks, use_act=use_act, train_masks=self.train_masks, filter_size=filter_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, input_size=(input_size // stride), pool_type=self.pool_type, mix_maps=self.mix_maps))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre_layers(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
return x
|
class LeNet(nn.Module):
def __init__(self, nfilters=None, nclasses=None, nmasks=None, level=None, filter_size=None, linear=128, input_size=28, debug=False, scale_noise=1, act='relu', use_act=False, first_filter_size=None, pool_type=None, dropout=None, unique_masks=False, train_masks=False, noise_type='uniform', mix_maps=None):
super(LeNet, self).__init__()
if (filter_size == 5):
n = 5
else:
n = 4
if (input_size == 32):
first_channels = 3
elif (input_size == 28):
first_channels = 1
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.linear1 = nn.Linear(((nfilters * n) * n), linear)
self.linear2 = nn.Linear(linear, nclasses)
self.dropout = nn.Dropout(p=dropout)
self.act = act_fn(act)
self.batch_norm = nn.BatchNorm1d(linear)
self.first_layers = nn.Sequential(PerturbLayer(in_channels=first_channels, out_channels=nfilters, nmasks=nmasks, level=(level * scale_noise), filter_size=first_filter_size, use_act=use_act, act=act, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=input_size, mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, debug=debug, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2), mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4), mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1))
self.last_layers = nn.Sequential(self.dropout, self.linear1, self.batch_norm, self.act, self.dropout, self.linear2)
def forward(self, x):
x = self.first_layers(x)
x = x.view(x.size(0), (- 1))
x = self.last_layers(x)
return x
|
class CifarNet(nn.Module):
def __init__(self, nfilters=None, nclasses=None, nmasks=None, level=None, filter_size=None, input_size=32, linear=256, scale_noise=1, act='relu', use_act=False, first_filter_size=None, pool_type=None, dropout=None, unique_masks=False, debug=False, train_masks=False, noise_type='uniform', mix_maps=None):
super(CifarNet, self).__init__()
if (filter_size == 5):
n = 5
else:
n = 4
if (input_size == 32):
first_channels = 3
elif (input_size == 28):
first_channels = 1
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.linear1 = nn.Linear(((nfilters * n) * n), linear)
self.linear2 = nn.Linear(linear, nclasses)
self.dropout = nn.Dropout(p=dropout)
self.act = act_fn(act)
self.batch_norm = nn.BatchNorm1d(linear)
self.first_layers = nn.Sequential(PerturbLayer(in_channels=first_channels, out_channels=nfilters, nmasks=nmasks, level=(level * scale_noise), unique_masks=unique_masks, filter_size=first_filter_size, use_act=use_act, input_size=input_size, act=act, train_masks=train_masks, noise_type=noise_type, mix_maps=mix_maps), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, debug=debug, use_act=True, act=act, mix_maps=mix_maps, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=input_size), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2)), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2)), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4)), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4)), pool(kernel_size=3, stride=2, padding=1))
self.last_layers = nn.Sequential(self.dropout, self.linear1, self.batch_norm, self.act, self.dropout, self.linear2)
def forward(self, x):
x = self.first_layers(x)
x = x.view(x.size(0), (- 1))
x = self.last_layers(x)
return x
|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = nn.Parameter(torch.Tensor(0), requires_grad=False).to(device)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.BatchNorm2d(in_planes), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1))
def forward(self, x):
if (self.noise.numel() == 0):
self.noise.resize_(x.data[0].shape).uniform_()
self.noise = (((2 * self.noise) - 1) * self.level)
y = torch.add(x, self.noise)
return self.layers(y)
|
class NoiseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBasicBlock, self).__init__()
self.layers = nn.Sequential(NoiseLayer(in_planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.BatchNorm2d(planes))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseResNet(nn.Module):
def __init__(self, block, nblocks, nfilters, nclasses, pool, level, first_filter_size=3):
super(NoiseResNet, self).__init__()
self.in_planes = nfilters
if (first_filter_size == 7):
pool = 1
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=2, padding=3, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
elif (first_filter_size == 3):
pool = 4
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=1, padding=1, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True))
elif (first_filter_size == 0):
print('\n\nThe original noiseresnet18 model does not support noise masks in the first layer, use perturb_resnet18 model, or set first_filter_size to 3 or 7\n\n')
return
self.pre_layers[0].weight.requires_grad = False
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], stride=1, level=level)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level)
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level)
self.avgpool = nn.AvgPool2d(pool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, planes, nblocks, stride=1, level=0.2, filter_size=1):
shortcut = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.in_planes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, level=level))
self.in_planes = (planes * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.pre_layers(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x6 = self.avgpool(x5)
x7 = x6.view(x6.size(0), (- 1))
x8 = self.linear(x7)
return x8
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, num_blocks, nfilters=64, avgpool=4, nclasses=10):
super(ResNet, self).__init__()
self.in_planes = nfilters
self.avgpool = avgpool
self.conv1 = nn.Conv2d(3, nfilters, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nfilters)
self.layer1 = self._make_layer(block, nfilters, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, (nfilters * 2), num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, (nfilters * 4), num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, (nfilters * 8), num_blocks[3], stride=2)
self.linear = nn.Linear(((nfilters * 8) * block.expansion), nclasses)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, self.avgpool)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def resnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, noise_type='uniform', train_masks=False, debug=False, mix_maps=None):
return ResNet(BasicBlock, [2, 2, 2, 2], nfilters=nfilters, avgpool=avgpool, nclasses=nclasses)
|
def noiseresnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=7, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return NoiseResNet(NoiseBasicBlock, [2, 2, 2, 2], nfilters=nfilters, pool=avgpool, nclasses=nclasses, level=level, first_filter_size=first_filter_size)
|
def perturb_resnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return PerturbResNet(PerturbBasicBlock, [2, 2, 2, 2], nfilters=nfilters, avgpool=avgpool, nclasses=nclasses, pool_type=pool_type, scale_noise=scale_noise, nmasks=nmasks, level=level, filter_size=filter_size, train_masks=train_masks, first_filter_size=first_filter_size, act=act, use_act=use_act, unique_masks=unique_masks, debug=debug, noise_type=noise_type, input_size=input_size, mix_maps=mix_maps)
|
def lenet(nfilters, avgpool=None, nclasses=10, nmasks=32, level=0.1, filter_size=3, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return LeNet(nfilters=nfilters, nclasses=nclasses, nmasks=nmasks, level=level, filter_size=filter_size, pool_type=pool_type, scale_noise=scale_noise, act=act, first_filter_size=first_filter_size, input_size=input_size, mix_maps=mix_maps, use_act=use_act, dropout=dropout, unique_masks=unique_masks, debug=debug, noise_type=noise_type, train_masks=train_masks)
|
def cifarnet(nfilters, avgpool=None, nclasses=10, nmasks=32, level=0.1, filter_size=3, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return CifarNet(nfilters=nfilters, nclasses=nclasses, nmasks=nmasks, level=level, filter_size=filter_size, pool_type=pool_type, scale_noise=scale_noise, act=act, use_act=use_act, first_filter_size=first_filter_size, input_size=input_size, dropout=dropout, unique_masks=unique_masks, debug=debug, noise_type=noise_type, train_masks=train_masks, mix_maps=mix_maps)
|
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(((9 * 6) * 6), 10)
self.noise = nn.Parameter(torch.Tensor(1, 1, 28, 28), requires_grad=True)
self.noise.data.uniform_((- 1), 1)
self.layers = nn.Sequential(nn.Conv2d(1, 9, kernel_size=5, stride=2, bias=False), nn.MaxPool2d(2, 2), nn.ReLU())
def forward(self, x):
x = torch.add(x, self.noise)
x = self.layers(x)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
print('{:.5f}'.format(self.noise.data[(0, 0, 0, 0)].cpu().numpy()))
return x
|
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, x):
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
return ((x - mean) / std)
|
def add_data_normalization(model, mean, std):
norm_layer = Normalize(mean=mean, std=std)
model_ = torch.nn.Sequential(norm_layer, model)
return model_
|
def apply_attack_on_dataset(model, dataloader, attack, epsilons, device, verbose=True):
robust_accuracy = []
c_a = []
for (images, labels) in dataloader:
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
c_a.append((correct_predictions.sum() / len(correct_predictions)).cpu().numpy())
clean_accuracy = np.mean(c_a)
print('Clean accuracy: ', clean_accuracy)
for epsilon in epsilons:
attack.eps = epsilon
r_a = []
if verbose:
print('Epsilon: ', epsilon)
t = trange(len(dataloader))
for (images, labels) in dataloader:
(images, labels) = (images.to(device), labels.to(device))
adv_images = attack(images, labels)
outputs = model(adv_images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
r_a.append((correct_predictions.sum() / len(correct_predictions)).cpu().numpy())
if verbose:
t.update(1)
robust_acc = np.mean(r_a)
if verbose:
print('Robust accuracy: ', robust_acc)
robust_accuracy.append(robust_acc)
return (clean_accuracy, robust_accuracy)
|
def apply_attack_on_batch(model, images, labels, attack, device):
(images, labels) = (images.to(device), labels.to(device))
outputs = model(images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions = (pre == labels)
correct_predictions = correct_predictions.cpu().numpy()
clean_accuracy = (correct_predictions.sum() / len(correct_predictions))
adv_images = attack(images, labels)
outputs = model(adv_images)
(_, pre) = torch.max(outputs.data, 1)
correct_predictions_adv = (pre == labels)
correct_predictions_adv = correct_predictions_adv.cpu().numpy()
robust_accuracy = (correct_predictions_adv.sum() / len(correct_predictions_adv))
adversarial_success = []
for (pred_c, pred_r) in zip(correct_predictions, correct_predictions_adv):
if (pred_c and (not pred_r)):
adversarial_success.append(True)
else:
adversarial_success.append(False)
print('Clean Accuracy on Batch: {}%'.format(clean_accuracy))
print('Robust Accuracy on Batch: {}%'.format(robust_accuracy))
return (adv_images.cpu(), adversarial_success, clean_accuracy, robust_accuracy)
|
def plot_accuracy(x, accuracy, methods, title, xlabel='x', ylabel='accuracy'):
for i in range(len(methods)):
plt.plot(x, accuracy[i], label=methods[i])
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
|
def imshow(img, title):
npimg = img.numpy()
fig = plt.figure(figsize=(15, 15))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title(title)
plt.show()
|
class Conv2dGrad(autograd.Function):
@staticmethod
def forward(context, input, weight, bias, stride, padding, dilation, groups):
(context.stride, context.padding, context.dilation, context.groups) = (stride, padding, dilation, groups)
context.save_for_backward(input, weight, bias)
output = torch.nn.functional.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, bias) = context.saved_tensors
grad_input = grad_weight = grad_bias = None
if context.needs_input_grad[0]:
grad_input = torch.nn.grad.conv2d_input(input_size=input.shape, weight=weight, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if context.needs_input_grad[1]:
grad_weight = torch.nn.grad.conv2d_weight(input=input, weight_size=weight.shape, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if ((bias is not None) and context.needs_input_grad[2]):
grad_bias = grad_output.sum(0).sum(2).sum(1)
return (grad_input, grad_weight, grad_bias, None, None, None, None)
|
class LinearGrad(autograd.Function):
@staticmethod
def forward(context, input, weight, bias=None):
context.save_for_backward(input, weight, bias)
output = torch.nn.functional.linear(input, weight, bias)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, bias) = context.saved_tensors
grad_input = grad_weight = grad_bias = None
if context.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if context.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if ((bias is not None) and context.needs_input_grad[2]):
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight, grad_bias)
|
class Conv2dGrad(autograd.Function):
'\n Autograd Function that Does a backward pass using the weight_backward matrix of the layer\n '
@staticmethod
def forward(context, input, weight, weight_backward, bias, bias_backward, stride, padding, dilation, groups):
(context.stride, context.padding, context.dilation, context.groups) = (stride, padding, dilation, groups)
context.save_for_backward(input, weight, weight_backward, bias, bias_backward)
output = torch.nn.functional.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, weight_backward, bias, bias_backward) = context.saved_tensors
grad_input = grad_weight = grad_weight_backward = grad_bias = grad_bias_backward = None
if context.needs_input_grad[0]:
grad_input = torch.nn.grad.conv2d_input(input_size=input.shape, weight=weight_backward, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if context.needs_input_grad[1]:
grad_weight = torch.nn.grad.conv2d_weight(input=input, weight_size=weight_backward.shape, grad_output=grad_output, stride=context.stride, padding=context.padding, dilation=context.dilation, groups=context.groups)
if ((bias is not None) and context.needs_input_grad[3]):
grad_bias = grad_output.sum(0).sum(2).sum(1)
return (grad_input, grad_weight, grad_weight_backward, grad_bias, grad_bias_backward, None, None, None, None)
|
class LinearGrad(autograd.Function):
'\n Autograd Function that Does a backward pass using the weight_backward matrix of the layer\n '
@staticmethod
def forward(context, input, weight, weight_backward, bias=None, bias_backward=None):
context.save_for_backward(input, weight, weight_backward, bias, bias_backward)
output = input.mm(weight.t())
if (bias is not None):
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(context, grad_output):
(input, weight, weight_backward, bias, bias_backward) = context.saved_tensors
grad_input = grad_weight = grad_weight_backward = grad_bias = grad_bias_backward = None
if context.needs_input_grad[0]:
grad_input = grad_output.mm(weight_backward)
if context.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if ((bias is not None) and context.needs_input_grad[3]):
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight, grad_weight_backward, grad_bias, grad_bias_backward)
|
def select_loss_function(loss_function_config):
if (loss_function_config['name'] == 'cross_entropy'):
return torch.nn.CrossEntropyLoss()
|
def create_lr_scheduler(lr_scheduler_config, optimizer):
gamma = lr_scheduler_config['gamma']
if (lr_scheduler_config['type'] == 'multistep_lr'):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_scheduler_config['milestones'], gamma=gamma, verbose=True)
else:
raise ValueError('Optimizer type {} not supported'.format(lr_scheduler_config['type']))
return lr_scheduler
|
def create_optimizer(optimizer_config, model):
lr = optimizer_config['lr']
weight_decay = optimizer_config['weight_decay']
momentum = optimizer_config['momentum']
if (optimizer_config['type'] == 'RMSProp'):
optimizer = torch.optim.RMSprop(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
elif (optimizer_config['type'] == 'Adam'):
betas = optimizer_config['betas']
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
elif (optimizer_config['type'] == 'SGD'):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
raise ValueError('Optimizer type {} not supported'.format(optimizer_config['type']))
return optimizer
|
class Benchmark():
def __init__(self, config_file):
self.config_file_path = config_file
self.config_file = read_yaml(config_file)
validate_config(self.config_file, 'benchmark', defaults=True)
torch.manual_seed(self.config_file['experiment']['seed'])
random.seed(self.config_file['experiment']['seed'])
np.random.seed(self.config_file['experiment']['seed'])
self.deterministic = self.config_file['experiment']['deterministic']
if self.deterministic:
cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
cudnn.benchmark = True
self.gpus = self.config_file['infrastructure']['gpus']
self.model_config = self.config_file['model']
if ('training' not in self.config_file):
self.benchmark_mode = 'evaluation'
else:
self.benchmark_mode = 'training'
self.hyperparameters = self.config_file['training']['hyperparameters']
self.metrics_config = self.config_file['training']['metrics']
self.optimizer_config = self.config_file['training']['optimizer']
self.lr_scheduler_config = self.config_file['training']['lr_scheduler']
self.mode_names = sorted((name for name in models.__dict__ if (name.islower() and (not name.startswith('__')) and isinstance(models.__dict__[name], ModuleType))))
self.mode = self.model_config['mode']['type']
self.layer_config = {}
self.layer_config = {'type': self.mode}
if ('options' in self.model_config['mode']):
self.mode_options = self.model_config['mode']['options']
self.layer_config['options'] = self.mode_options
if (self.mode not in self.mode_names):
raise ValueError('Mode not {} supported'.format(self.mode))
options = models.__dict__[self.mode].__dict__
self.model_names = sorted((name for name in options if (name.islower() and (not name.startswith('__')) and callable(options[name]))))
self.loss_function_config = self.config_file['model']['loss_function']
self.data_config = self.config_file['data']
self.num_workers = self.config_file['data']['num_workers']
self.multi_gpu = False
if (isinstance(self.gpus, int) and (self.gpus <= (- 1))):
self.device = 'cpu'
else:
if (not torch.cuda.is_available()):
raise ValueError('You selected {} GPUs but there are no GPUs available'.format(self.gpus))
self.device = 'cuda'
if isinstance(self.gpus, int):
self.device += (':' + str(self.gpus))
elif isinstance(self.gpus, list):
self.device += (':' + str(self.gpus[0]))
self.multi_gpu = True
self.output_dir = os.path.join(self.config_file['experiment']['output_dir'], self.config_file['experiment']['name'])
mkdir(self.output_dir)
shutil.copy2(self.config_file_path, os.path.join(self.output_dir, 'config.yaml'))
def run(self):
self.epochs = self.hyperparameters['epochs']
self.batch_size = self.hyperparameters['batch_size']
self.target_size = self.data_config['target_size']
self.dataset_creator = DatasetSelector(self.data_config['dataset']).get_dataset()
if (self.data_config['dataset_path'] is not None):
self.dataset = self.dataset_creator(self.target_size, dataset_path=self.data_config['dataset_path'])
else:
self.dataset = self.dataset_creator(self.target_size)
self.train_dataloader = self.dataset.create_train_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.val_dataloader = self.dataset.create_val_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.num_classes = self.dataset.num_classes
if ((self.model_config['architecture'] is not None) and (self.model_config['architecture'] in self.model_names)):
arch = self.model_config['architecture']
if self.model_config['pretrained']:
print("=> Using pre-trained model '{}'".format(self.model_config['architecture']))
else:
print("=> Creating model from scratch '{}'".format(self.model_config['architecture']))
self.model = models.__dict__[self.mode].__dict__[arch](pretrained=self.model_config['pretrained'], num_classes=self.num_classes, layer_config=self.layer_config)
elif (self.model_config['checkpoint'] is not None):
print('Loading model checkpoint from ', self.model_config['checkpoint'])
self.model = torch.load(self.model_config['checkpoint'], map_location=self.device)
self.model.to(self.device)
if isinstance(self.gpus, list):
self.model = nn.DataParallel(self.model, self.gpus)
self.loss_function = select_loss_function(self.loss_function_config)
self.optimizer = create_optimizer(self.optimizer_config, self.model)
self.lr_scheduler = create_lr_scheduler(self.lr_scheduler_config, self.optimizer)
print('\nBenchmarking model on {}'.format(str(self.dataset)))
print(self.metrics_config)
trainer = Trainer(model=self.model, mode=self.mode, loss_function=self.loss_function, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, val_dataloader=self.val_dataloader, device=self.device, epochs=self.epochs, output_dir=self.output_dir, metrics_config=self.metrics_config, multi_gpu=self.multi_gpu)
trainer.run()
if self.config_file['evaluation']:
self.model = torch.load(os.path.join(self.output_dir, 'model_best_acc.pth'))
self.test_dataloader = self.dataset.create_test_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.evaluator = Evaluator(self.model, self.mode, self.loss_function, self.test_dataloader, self.device, self.output_dir)
self.evaluate(self.evaluator)
def run_eval(self):
if (self.model_config['checkpoint'] is not None):
print('Loading model checkpoint from ', self.model_config['checkpoint'])
self.model = torch.load(self.model_config['checkpoint'], map_location=self.device)
else:
raise ValueError('A model checkpoint must be specified')
self.target_size = self.data_config['target_size']
self.batch_size = self.data_config['batch_size']
self.dataset_creator = DatasetSelector(self.data_config['dataset']).get_dataset()
if (self.data_config['dataset_path'] is not None):
self.dataset = self.dataset_creator(self.target_size, dataset_path=self.data_config['dataset_path'])
else:
self.dataset = self.dataset_creator(self.target_size)
self.test_dataloader = self.dataset.create_test_dataloader(self.batch_size, deterministic=self.deterministic, num_workers=self.num_workers)
self.loss_function = select_loss_function(self.loss_function_config)
self.evaluator = Evaluator(self.model, None, self.loss_function, self.test_dataloader, self.device, self.output_dir)
self.evaluate(self.evaluator)
def evaluate(self, evaluator):
(self.test_acc, self.test_loss) = evaluator.run()
self.results_df = pd.DataFrame({'model_name': [self.config_file['experiment']['name']], 'dataset': [self.data_config['dataset']], 'accuracy': [float(self.test_acc)], 'error': [(100.0 - float(self.test_acc))], 'loss': [self.test_loss]})
print('Test Results')
print(self.results_df)
csv_results = os.path.join(self.output_dir, 'results.csv')
json_results = os.path.join(self.output_dir, 'results.json')
print('Test Results saved in: \n{}\n{}'.format(csv_results, json_results))
self.results_df.to_csv(csv_results)
self.results_df.to_json(json_results, indent=2, orient='records')
|
def __main__():
parser = argparse.ArgumentParser(description='BioTorch')
parser.add_argument('--config_file', help='Path to the configuration file')
try:
args = parser.parse_args()
benchmark = Benchmark(args.config_file)
if (benchmark.benchmark_mode == 'training'):
benchmark.run()
else:
benchmark.run_eval()
except Exception as e:
message = 'an unexpected error occurred: {}: {}'.format(type(e).__name__, ((e.message if hasattr(e, 'message') else '') or str(e)))
raise ValueError(message)
|
class CIFAR100(Dataset):
def __str__(self):
return 'CIFAR-100 Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar100', train_transforms=None, test_transforms=None):
self.mean = (0.5071, 0.4867, 0.4408)
self.std = (0.2675, 0.2565, 0.2761)
self.num_classes = 100
super(CIFAR100, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.CIFAR100(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.CIFAR100(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.CIFAR100(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class CIFAR10(Dataset):
def __str__(self):
return 'CIFAR-10 Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar10', train_transforms=None, test_transforms=None):
self.mean = (0.4914, 0.4821, 0.4465)
self.std = (0.247, 0.2435, 0.2616)
self.num_classes = 10
super(CIFAR10, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class CIFAR10Benchmark(Dataset):
def __str__(self):
return 'CIFAR-10 Benchmark Dataset'
def __init__(self, target_size, dataset_path='./datasets/cifar10', train_transforms=None, test_transforms=None):
self.mean = (0.4914, 0.4821, 0.4465)
self.std = (0.247, 0.2435, 0.2616)
self.num_classes = 10
super(CIFAR10Benchmark, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
random.seed(0)
self.train_transforms = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(self.mean, self.std)]
self.train_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
val_indices = random.sample(range(0, len(self.train_dataset.data)), 5000)
self.train_dataset.data = np.delete(self.train_dataset.data, val_indices, axis=0)
self.train_dataset.targets = np.delete(self.train_dataset.targets, val_indices, axis=0)
self.val_dataset = datasets.CIFAR10(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.test_transforms))
self.val_dataset.data = self.val_dataset.data[val_indices]
self.val_dataset.targets = list(np.array(self.val_dataset.targets)[val_indices])
self.test_dataset = datasets.CIFAR10(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class Dataset(object):
def __init__(self, target_size, dataset_path, mean=None, std=None, train_transforms=None, test_transforms=None):
self.dataset_path = dataset_path
self.target_size = target_size
self.mean = mean
self.std = std
self.train_transforms = train_transforms
self.test_transforms = test_transforms
default_transforms = [transforms.Resize((self.target_size, self.target_size)), transforms.ToTensor()]
if ((self.mean is not None) and (self.std is not None)):
default_transforms.append(transforms.Normalize(self.mean, self.std))
if (self.train_transforms is None):
self.train_transforms = default_transforms
if (self.test_transforms is None):
self.test_transforms = default_transforms
@staticmethod
def seed_worker(worker_id):
worker_seed = (torch.initial_seed() % (2 ** 32))
numpy.random.seed(worker_seed)
random.seed(worker_seed)
def _create_dataloader(self, mode, batch_size, deterministic=False, shuffle=True, drop_last=True, num_workers=0):
gen = None
worker_init_fn = None
if (deterministic and (num_workers > 0)):
worker_init_fn = self.seed_worker
gen = torch.Generator()
gen.manual_seed(0)
if (mode == 'train'):
return DataLoader(self.train_dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
elif (mode == 'val'):
return DataLoader(self.val_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
elif (mode == 'test'):
return DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers, worker_init_fn=worker_init_fn, generator=gen)
def create_train_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('train', batch_size, deterministic=deterministic, num_workers=num_workers)
def create_val_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('val', batch_size, deterministic=deterministic, num_workers=num_workers)
def create_test_dataloader(self, batch_size, deterministic=False, num_workers=0):
return self._create_dataloader('test', batch_size, deterministic=deterministic, num_workers=num_workers)
|
class FashionMNIST(Dataset):
def __str__(self):
return 'Fashion MNIST Dataset'
def __init__(self, target_size, dataset_path='./datasets/fashion-mnist', train_transforms=None, test_transforms=None):
self.mean = (0.2859,)
self.std = (0.353,)
self.num_classes = 10
super(FashionMNIST, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.FashionMNIST(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.FashionMNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.FashionMNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class ImageNet(Dataset):
def __str__(self):
return 'Imagenet Dataset'
def __init__(self, target_size, dataset_path='./datasets/imagenet', train_transforms=None, test_transforms=None):
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
self.num_classes = 1000
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (train_transforms is None):
train_transforms = [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
if (test_transforms is None):
test_transforms = [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]
super(ImageNet, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Reading {} data from {}'.format(str(self), dataset_path))
self.train_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'train'), transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.ImageFolder(os.path.join(self.dataset_path, 'val'), transform=transforms.Compose(self.test_transforms))
|
class MNIST(Dataset):
def __str__(self):
return 'MNIST Dataset'
def __init__(self, target_size, dataset_path='./datasets/mnist', train_transforms=None, test_transforms=None):
self.mean = (0.1307,)
self.std = (0.3081,)
self.num_classes = 10
super(MNIST, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.MNIST(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
|
class DatasetSelector():
def __init__(self, dataset_name):
if (dataset_name not in DATASETS_AVAILABLE):
raise ValueError('Dataset name specified: {} not in the list of available datasets {}'.format(dataset_name, DATASETS_AVAILABLE))
self.dataset_name = dataset_name
def get_dataset(self):
if (self.dataset_name == 'cifar10'):
return CIFAR10
elif (self.dataset_name == 'cifar10_benchmark'):
return CIFAR10Benchmark
elif (self.dataset_name == 'cifar100'):
return CIFAR100
elif (self.dataset_name == 'mnist'):
return MNIST
elif (self.dataset_name == 'fashion_mnist'):
return FashionMNIST
elif (self.dataset_name == 'imagenet'):
return ImageNet
|
class Evaluator():
def __init__(self, model, mode, loss_function, dataloader, device, output_dir, multi_gpu=False):
self.model = model
self.mode = mode
self.output_dir = output_dir
self.logs_dir = os.path.join(output_dir, 'logs')
self.loss_function = loss_function
self.dataloader = dataloader
self.device = device
self.multi_gpu = multi_gpu
def run(self):
(acc, loss) = test(model=self.model, loss_function=self.loss_function, test_dataloader=self.dataloader, device=self.device)
return (acc.cpu().numpy(), loss)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'backpropagation'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.init_parameters()
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'backpropagation'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.init_parameters()
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Batchwise Random Magnitude Sign-concordant Feedbacks (brSF):\n weight_backward = |M| ◦ sign(weight), where M is redrawn after each update of W (i.e., each mini-batch).\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'brsf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Batchwise Random Magnitude Sign-concordant Feedbacks (brSF):\n weight_backward = |M| ◦ sign(weight), where M is redrawn after each update of W (i.e., each mini-batch).\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'brsf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, output_dim: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'init': 'xavier', 'gradient_clip': False}
self.options = self.layer_config['options']
self.init = self.options['init']
self.loss_gradient = None
self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels, self.kernel_size[0], self.kernel_size[1])), requires_grad=False)
self.bias_backward = None
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_channels)), requires_grad=False)
self.init_parameters()
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
self.register_backward_hook(self.dfa_backward_hook)
self.weight_ratio = 0
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
def forward(self, x):
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
return Conv2dGrad.apply(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
@staticmethod
def dfa_backward_hook(module, grad_input, grad_output):
if (grad_input[0] is None):
return grad_input
else:
out_grad = module.loss_gradient.unsqueeze(2).repeat(1, 1, grad_output[0].size()[2])
out_grad = out_grad.unsqueeze(3).repeat(1, 1, 1, grad_output[0].size()[3])
grad_dfa = torch.nn.grad.conv2d_input(input_size=grad_input[0].shape, weight=module.weight_backward, grad_output=out_grad, stride=module.stride, padding=module.padding, dilation=module.dilation, groups=module.groups)
if (len(grad_input) == 2):
return (grad_dfa, grad_input[1])
else:
return (grad_dfa, grad_input[1], grad_input[2])
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, output_dim: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.init = self.options['init']
self.loss_gradient = None
self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False)
self.bias_backward = None
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False)
self.init_parameters()
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
with torch.no_grad():
self.norm_initial_weights = torch.linalg.norm(self.weight)
self.register_backward_hook(self.dfa_backward_hook)
self.weight_ratio = 0
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x):
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
return LinearGrad.apply(x, self.weight, self.bias)
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def dfa_backward_hook(module, grad_input, grad_output):
if (grad_input[0] is None):
return grad_input
else:
grad_dfa = module.loss_gradient.mm(module.weight_backward)
if (len(grad_input) == 2):
return (grad_dfa, grad_input[1])
else:
return (grad_dfa, grad_input[1], grad_input[2])
|
class Conv2d(fa_constructor.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'fa'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'fa'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
class Conv2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'fa'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.weight_backward = nn.Parameter(torch.Tensor(self.weight.size()), requires_grad=False)
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(self.bias.size()), requires_grad=False)
else:
self.register_parameter('bias', None)
self.bias_backward = None
self.init_parameters()
if (self.type == 'frsf'):
self.weight_backward = nn.Parameter(torch.abs(self.weight_backward), requires_grad=False)
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
if ((self.type == 'usf') or (self.type == 'brsf')):
with torch.no_grad():
self.weight_backward = torch.nn.Parameter((self.scaling_factor * torch.sign(self.weight)), requires_grad=False)
self.alignment = 0
self.weight_ratio = 0
if (('gradient_clip' in self.options) and self.options['gradient_clip']):
self.register_backward_hook(self.gradient_clip)
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x: Tensor) -> Tensor:
weight_backward = None
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
if (self.type == 'usf'):
weight_backward = torch.nn.Parameter(torch.sign(self.weight), requires_grad=False)
weight_backward = torch.nn.Parameter((self.scaling_factor * weight_backward), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'brsf'):
wb = torch.Tensor(self.weight.size()).to(self.weight.device)
if (self.init == 'xavier'):
torch.nn.init.xavier_uniform_(wb)
else:
init.kaiming_uniform_(wb, a=math.sqrt(5))
weight_backward = torch.nn.Parameter((torch.abs(wb) * torch.sign(self.weight)), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'frsf'):
weight_backward = torch.nn.Parameter((self.weight_backward * torch.sign(self.weight)), requires_grad=False)
if (weight_backward is None):
weight_backward = self.weight_backward
return Conv2dGrad.apply(x, self.weight, weight_backward, self.bias, self.bias_backward, self.stride, self.padding, self.dilation, self.groups)
def compute_alignment(self):
self.alignment = compute_matrix_angle(self.weight_backward, self.weight)
return self.alignment
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def gradient_clip(module, grad_input, grad_output):
grad_input = list(grad_input)
for i in range(len(grad_input)):
if (grad_input[i] is not None):
grad_input[i] = torch.clamp(grad_input[i], (- 1), 1)
return tuple(grad_input)
|
class Linear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
super(Linear, self).__init__(in_features, out_features, bias)
self.layer_config = layer_config
if (self.layer_config is None):
self.layer_config = {'type': 'fa'}
if ('options' not in self.layer_config):
self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'}
self.options = self.layer_config['options']
self.type = self.layer_config['type']
self.init = self.options['init']
self.weight_backward = nn.Parameter(torch.Tensor(self.weight.size()), requires_grad=False)
if (self.bias is not None):
self.bias_backward = nn.Parameter(torch.Tensor(self.bias.size()), requires_grad=False)
else:
self.register_parameter('bias', None)
self.bias_backward = None
self.init_parameters()
if (self.type == 'frsf'):
self.weight_backward = nn.Parameter(torch.abs(self.weight_backward), requires_grad=False)
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.norm_initial_weights = torch.linalg.norm(self.weight)
if ((self.type == 'usf') or (self.type == 'brsf')):
with torch.no_grad():
self.weight_backward = torch.nn.Parameter((self.scaling_factor * torch.sign(self.weight)), requires_grad=False)
self.alignment = 0
self.weight_ratio = 0
if (('gradient_clip' in self.options) and self.options['gradient_clip']):
self.register_backward_hook(self.gradient_clip)
def init_parameters(self) -> None:
(fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight)
if (self.init == 'xavier'):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.weight_backward)
self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out))))
if (self.bias is not None):
nn.init.constant_(self.bias, 0)
nn.init.constant_(self.bias_backward, 0)
else:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5))
self.scaling_factor = (1 / math.sqrt((3 * fan_in)))
if (self.bias is not None):
bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0)
nn.init.uniform_(self.bias, (- bound), bound)
nn.init.uniform_(self.bias_backward, (- bound), bound)
def forward(self, x: Tensor) -> Tensor:
weight_backward = None
with torch.no_grad():
if (('constrain_weights' in self.options) and self.options['constrain_weights']):
self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight)))
if (self.type == 'usf'):
weight_backward = torch.nn.Parameter(torch.sign(self.weight), requires_grad=False)
weight_backward = torch.nn.Parameter((self.scaling_factor * weight_backward), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'brsf'):
wb = torch.Tensor(self.weight.size()).to(self.weight.device)
if (self.init == 'xavier'):
torch.nn.init.xavier_uniform_(wb)
else:
init.kaiming_uniform_(wb, a=math.sqrt(5))
weight_backward = torch.nn.Parameter((torch.abs(wb) * torch.sign(self.weight)), requires_grad=False)
self.weight_backward = weight_backward
elif (self.type == 'frsf'):
weight_backward = torch.nn.Parameter((torch.abs(self.weight_backward) * torch.sign(self.weight)), requires_grad=False)
if (weight_backward is None):
weight_backward = self.weight_backward
return LinearGrad.apply(x, self.weight, weight_backward, self.bias, self.bias_backward)
def compute_alignment(self):
self.alignment = compute_matrix_angle(self.weight_backward, self.weight)
return self.alignment
def compute_weight_ratio(self):
with torch.no_grad():
self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight))
return self.weight_diff
@staticmethod
def gradient_clip(module, grad_input, grad_output):
grad_input = list(grad_input)
for i in range(len(grad_input)):
if (grad_input[i] is not None):
grad_input[i] = torch.clamp(grad_input[i], (- 1), 1)
return tuple(grad_input)
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Fixed Random Magnitude Sign-concordant Feedbacks (frSF):\n weight_backward = |M| ◦ sign(weight), where M is initialized once and fixed throughout each experiment\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'frsf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n with the modification of taking the absolute value of the Backward Matrix\n\n Fixed Random Magnitude Sign-concordant Feedbacks (frSF):\n weight_backward = |M| ◦ sign(weight), where M is initialized once and fixed throughout each experiment\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'frsf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
def compute_matrix_angle(A, B):
with torch.no_grad():
flat_A = torch.reshape(A, ((- 1),))
normalized_flat_A = (flat_A / torch.norm(flat_A))
flat_B = torch.reshape(B, ((- 1),))
normalized_flat_B = (flat_B / torch.norm(flat_B))
angle = ((180.0 / math.pi) * torch.arccos(torch.clip(torch.dot(normalized_flat_A, normalized_flat_B), (- 1.0), 1.0)))
return angle
|
class Conv2d(fa_constructor.Conv2d):
'\n Implements the method from How Important Is Weight Symmetry in Backpropagation?\n\n Uniform Sign-concordant Feedbacks (uSF):\n Backward Weights = sign(W)\n\n (https://arxiv.org/pdf/1510.05067.pdf)\n '
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: Union[(str, _size_2_t)]=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', layer_config: dict=None):
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'usf'
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config)
|
class Linear(fa_constructor.Linear):
'\n Method from [How Important Is Weight Symmetry in Backpropagation?](https://arxiv.org/pdf/1510.05067.pdf)\n\n Uniform Sign-concordant Feedbacks (uSF):\n weight_backward = sign(weight)\n\n '
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'usf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config)
|
def convert_layer(layer, mode, copy_weights, layer_config=None, output_dim=None):
(layer_bias, bias_weight) = (False, None)
if (('weight' in layer.__dict__['_parameters']) and copy_weights):
weight = layer.weight
if (('bias' in layer.__dict__['_parameters']) and (layer.bias is not None)):
bias_weight = layer.bias
layer_bias = True
new_layer = None
if (layer_config is None):
layer_config = {}
layer_config['type'] = mode
if isinstance(layer, nn.Conv2d):
if (mode in ['fa', 'usf', 'brsf', 'frsf']):
new_layer = fa_constructor.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif (mode == 'dfa'):
new_layer = dfa_layers.Conv2d(layer.in_channels, layer.out_channels, output_dim, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif (mode == 'backpropagation'):
new_layer = bp_layers.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config)
elif isinstance(layer, nn.Linear):
if (mode in ['fa', 'usf', 'brsf', 'frsf']):
new_layer = fa_constructor.Linear(layer.in_features, layer.out_features, layer_bias, layer_config)
elif (mode == 'dfa'):
new_layer = dfa_layers.Linear(layer.in_features, layer.out_features, output_dim, layer_bias, layer_config)
elif (mode == 'backpropagation'):
new_layer = bp_layers.Linear(layer.in_features, layer.out_features, layer_bias, layer_config)
if ((new_layer is not None) and copy_weights):
new_layer.weight = weight
new_layer.bias = bias_weight
return new_layer
|
def alexnet(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> AlexNet:
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n The required minimum input size of the model is 63x63.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting AlexNet to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.alexnet, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet161(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-161 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet161, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet169(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-169 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet169, MODE, layer_config, pretrained, progress, num_classes)
|
def densenet201(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n The required minimum input size of the model is 29x29.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n num_classes (int): Output dimension of the last linear layer\n layer_config (dict): Custom biologically plausible method layer configuration\n '
print('Converting Densenet-201 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet201, MODE, layer_config, pretrained, progress, num_classes)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.