repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
PT-MAP | PT-MAP-master/test_standard.py | import collections
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import math
import torch.nn.functional as F
import torch.optim as optim
from numpy import linalg as LA
from tqdm.notebook import tqdm
use_gpu = torch.cuda.is_available()
# ========================================
# loading datas
def centerDatas(datas):
datas[:, :n_lsamples] = datas[:, :n_lsamples, :] - datas[:, :n_lsamples].mean(1, keepdim=True)
datas[:, :n_lsamples] = datas[:, :n_lsamples, :] / torch.norm(datas[:, :n_lsamples, :], 2, 2)[:, :, None]
datas[:, n_lsamples:] = datas[:, n_lsamples:, :] - datas[:, n_lsamples:].mean(1, keepdim=True)
datas[:, n_lsamples:] = datas[:, n_lsamples:, :] / torch.norm(datas[:, n_lsamples:, :], 2, 2)[:, :, None]
return datas
def scaleEachUnitaryDatas(datas):
norms = datas.norm(dim=2, keepdim=True)
return datas/norms
def QRreduction(datas):
ndatas = torch.qr(datas.permute(0,2,1)).R
ndatas = ndatas.permute(0,2,1)
return ndatas
class Model:
def __init__(self, n_ways):
self.n_ways = n_ways
# --------- GaussianModel
class GaussianModel(Model):
def __init__(self, n_ways, lam):
super(GaussianModel, self).__init__(n_ways)
self.mus = None # shape [n_runs][n_ways][n_nfeat]
self.lam = lam
def clone(self):
other = GaussianModel(self.n_ways)
other.mus = self.mus.clone()
return self
def cuda(self):
self.mus = self.mus.cuda()
def initFromLabelledDatas(self):
self.mus = ndatas.reshape(n_runs, n_shot+n_queries,n_ways, n_nfeat)[:,:n_shot,].mean(1)
def updateFromEstimate(self, estimate, alpha):
Dmus = estimate - self.mus
self.mus = self.mus + alpha * (Dmus)
def compute_optimal_transport(self, M, r, c, epsilon=1e-6):
r = r.cuda()
c = c.cuda()
n_runs, n, m = M.shape
P = torch.exp(- self.lam * M)
P /= P.view((n_runs, -1)).sum(1).unsqueeze(1).unsqueeze(1)
u = torch.zeros(n_runs, n).cuda()
maxiters = 1000
iters = 1
# normalize this matrix
while torch.max(torch.abs(u - P.sum(2))) > epsilon:
u = P.sum(2)
P *= (r / u).view((n_runs, -1, 1))
P *= (c / P.sum(1)).view((n_runs, 1, -1))
if iters == maxiters:
break
iters = iters + 1
return P, torch.sum(P * M)
def getProbas(self):
# compute squared dist to centroids [n_runs][n_samples][n_ways]
dist = (ndatas.unsqueeze(2)-self.mus.unsqueeze(1)).norm(dim=3).pow(2)
p_xj = torch.zeros_like(dist)
r = torch.ones(n_runs, n_usamples)
c = torch.ones(n_runs, n_ways) * n_queries
p_xj_test, _ = self.compute_optimal_transport(dist[:, n_lsamples:], r, c, epsilon=1e-6)
p_xj[:, n_lsamples:] = p_xj_test
p_xj[:,:n_lsamples].fill_(0)
p_xj[:,:n_lsamples].scatter_(2,labels[:,:n_lsamples].unsqueeze(2), 1)
return p_xj
def estimateFromMask(self, mask):
emus = mask.permute(0,2,1).matmul(ndatas).div(mask.sum(dim=1).unsqueeze(2))
return emus
# =========================================
# MAP
# =========================================
class MAP:
def __init__(self, alpha=None):
self.verbose = False
self.progressBar = False
self.alpha = alpha
def getAccuracy(self, probas):
olabels = probas.argmax(dim=2)
matches = labels.eq(olabels).float()
acc_test = matches[:,n_lsamples:].mean(1)
m = acc_test.mean().item()
pm = acc_test.std().item() *1.96 / math.sqrt(n_runs)
return m, pm
def performEpoch(self, model, epochInfo=None):
p_xj = model.getProbas()
self.probas = p_xj
if self.verbose:
print("accuracy from filtered probas", self.getAccuracy(self.probas))
m_estimates = model.estimateFromMask(self.probas)
# update centroids
model.updateFromEstimate(m_estimates, self.alpha)
if self.verbose:
op_xj = model.getProbas()
acc = self.getAccuracy(op_xj)
print("output model accuracy", acc)
def loop(self, model, n_epochs=20):
self.probas = model.getProbas()
if self.verbose:
print("initialisation model accuracy", self.getAccuracy(self.probas))
if self.progressBar:
if type(self.progressBar) == bool:
pb = tqdm(total = n_epochs)
else:
pb = self.progressBar
for epoch in range(1, n_epochs+1):
if self.verbose:
print("----- epoch[{:3d}] lr_p: {:0.3f} lr_m: {:0.3f}".format(epoch, self.alpha))
self.performEpoch(model, epochInfo=(epoch, n_epochs))
if (self.progressBar): pb.update()
# get final accuracy and return it
op_xj = model.getProbas()
acc = self.getAccuracy(op_xj)
return acc
if __name__ == '__main__':
# ---- data loading
n_shot = 5
n_ways = 5
n_queries = 15
n_runs=10000
n_lsamples = n_ways * n_shot
n_usamples = n_ways * n_queries
n_samples = n_lsamples + n_usamples
import FSLTask
cfg = {'shot':n_shot, 'ways':n_ways, 'queries':n_queries}
FSLTask.loadDataSet("miniimagenet")
FSLTask.setRandomStates(cfg)
ndatas = FSLTask.GenerateRunSet(cfg=cfg)
ndatas = ndatas.permute(0,2,1,3).reshape(n_runs, n_samples, -1)
labels = torch.arange(n_ways).view(1,1,n_ways).expand(n_runs,n_shot+n_queries,5).clone().view(n_runs, n_samples)
# Power transform
beta = 0.5
ndatas[:,] = torch.pow(ndatas[:,]+1e-6, beta)
ndatas = QRreduction(ndatas)
n_nfeat = ndatas.size(2)
ndatas = scaleEachUnitaryDatas(ndatas)
# trans-mean-sub
ndatas = centerDatas(ndatas)
print("size of the datas...", ndatas.size())
# switch to cuda
ndatas = ndatas.cuda()
labels = labels.cuda()
#MAP
lam = 10
model = GaussianModel(n_ways, lam)
model.initFromLabelledDatas()
alpha = 0.2
optim = MAP(alpha)
optim.verbose=False
optim.progressBar=True
acc_test = optim.loop(model, n_epochs=20)
print("final accuracy found {:0.2f} +- {:0.2f}".format(*(100*x for x in acc_test)))
| 6,764 | 28.159483 | 122 | py |
PT-MAP | PT-MAP-master/wrn_mixup_model.py | ### dropout has been removed in this code. original code had dropout#####
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys, os
import numpy as np
import random
act = torch.nn.ReLU()
import math
from torch.nn.utils.weight_norm import WeightNorm
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
def to_one_hot(inp,num_classes):
y_onehot = torch.FloatTensor(inp.size(0), num_classes)
if torch.cuda.is_available():
y_onehot = y_onehot.cuda()
y_onehot.zero_()
x = inp.type(torch.LongTensor)
if torch.cuda.is_available():
x = x.cuda()
x = torch.unsqueeze(x , 1)
y_onehot.scatter_(1, x , 1)
return Variable(y_onehot,requires_grad=False)
# return y_onehot
def mixup_data(x, y, lam):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
class WideResNet(nn.Module):
def __init__(self, depth=28, widen_factor=10, num_classes= 200 , loss_type = 'dist', per_img_std = False, stride = 1 ):
dropRate = 0.5
flatten = True
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, stride, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and linear
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.nChannels = nChannels[3]
if loss_type == 'softmax':
self.linear = nn.Linear(nChannels[3], int(num_classes))
self.linear.bias.data.fill_(0)
else:
self.linear = distLinear(nChannels[3], int(num_classes))
self.num_classes = num_classes
if flatten:
self.final_feat_dim = 640
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x, target= None, mixup=False, mixup_hidden=True, mixup_alpha=None , lam = 0.4):
if target is not None:
if mixup_hidden:
layer_mix = random.randint(0,3)
elif mixup:
layer_mix = 0
else:
layer_mix = None
out = x
target_a = target_b = target
if layer_mix == 0:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.conv1(out)
out = self.block1(out)
if layer_mix == 1:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.block2(out)
if layer_mix == 2:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.block3(out)
if layer_mix == 3:
out, target_a , target_b , lam = mixup_data(out, target, lam=lam)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[2:])
out = out.view(out.size(0), -1)
out1 = self.linear(out)
return out , out1 , target_a , target_b
else:
out = x
out = self.conv1(out)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[2:])
out = out.view(out.size(0), -1)
out1 = self.linear(out)
return out, out1
def wrn28_10(num_classes=200 , loss_type = 'dist'):
model = WideResNet(depth=28, widen_factor=10, num_classes=num_classes, loss_type = loss_type , per_img_std = False, stride = 1 )
return model
| 7,986 | 36.674528 | 206 | py |
PT-MAP | PT-MAP-master/res_mixup_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from torch.nn.utils.weight_norm import WeightNorm
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def mixup_data(x, y, lam):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2; #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10; #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=200, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = distLinear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, target=None, mixup=False, mixup_hidden = True, mixup_alpha=None, lam=0.4):
if target is not None:
if mixup_hidden:
layer_mix = random.randint(0,5)
elif mixup:
layer_mix = 0
else:
layer_mix = None
out = x
if layer_mix == 0:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
if layer_mix == 1:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer2(out)
if layer_mix == 2:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer3(out)
if layer_mix == 3:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.layer4(out)
if layer_mix == 4:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out1 = self.fc.forward(out)
if layer_mix == 5:
out, target_a, target_b, lam = mixup_data(out, target, lam=lam)
return out, out1, target_a, target_b
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out1 = self.fc.forward(out)
return out, out1
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| 7,280 | 35.58794 | 206 | py |
PT-MAP | PT-MAP-master/save_plk.py | from __future__ import print_function
import argparse
import csv
import os
import collections
import pickle
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from io_utils import parse_args
from data.datamgr import SimpleDataManager , SetDataManager
import configs
import wrn_mixup_model
import res_mixup_model
import torch.nn.functional as F
from io_utils import parse_args, get_resume_file ,get_assigned_file
from os import path
use_gpu = torch.cuda.is_available()
class WrappedModel(nn.Module):
def __init__(self, module):
super(WrappedModel, self).__init__()
self.module = module
def forward(self, x):
return self.module(x)
def save_pickle(file, data):
with open(file, 'wb') as f:
pickle.dump(data, f)
def load_pickle(file):
with open(file, 'rb') as f:
return pickle.load(f)
def extract_feature(val_loader, model, checkpoint_dir, tag='last'):
save_dir = '{}/{}'.format(checkpoint_dir, tag)
if os.path.isfile(save_dir + '/output.plk'):
data = load_pickle(save_dir + '/output.plk')
return data
else:
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
#model.eval()
with torch.no_grad():
output_dict = collections.defaultdict(list)
for i, (inputs, labels) in enumerate(val_loader):
# compute output
inputs = inputs.cuda()
labels = labels.cuda()
outputs,_ = model(inputs)
outputs = outputs.cpu().data.numpy()
for out, label in zip(outputs, labels):
output_dict[label.item()].append(out)
all_info = output_dict
save_pickle(save_dir + '/output.plk', all_info)
return all_info
if __name__ == '__main__':
params = parse_args('test')
loadfile = configs.data_dir[params.dataset] + 'novel.json'
if params.dataset == 'miniImagenet' or params.dataset == 'CUB':
datamgr = SimpleDataManager(84, batch_size = 256)
novel_loader = datamgr.get_data_loader(loadfile, aug = False)
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
modelfile = get_resume_file(checkpoint_dir)
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=params.num_classes)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=params.num_classes)
model = model.cuda()
cudnn.benchmark = True
checkpoint = torch.load(modelfile)
state = checkpoint['state']
state_keys = list(state.keys())
callwrap = False
if 'module' in state_keys[0]:
callwrap = True
if callwrap:
model = WrappedModel(model)
model_dict_load = model.state_dict()
model_dict_load.update(state)
model.load_state_dict(model_dict_load)
model.eval()
output_dict=extract_feature(novel_loader, model, checkpoint_dir, tag='last')
print("features saved!")
| 3,145 | 27.342342 | 108 | py |
PT-MAP | PT-MAP-master/FSLTask.py | import os
import pickle
import numpy as np
import torch
# from tqdm import tqdm
# ========================================================
# Usefull paths
_datasetFeaturesFiles = {"miniimagenet": "./checkpoints/miniImagenet/WideResNet28_10_S2M2_R/last/output.plk",
"cub": "./checkpoints/CUB/WideResNet28_10_S2M2_R/last/output.plk",
"cifar": "./checkpoints/cifar/WideResNet28_10_S2M2_R/last/output.plk",
"cross": "./checkpoints/cross/WideResNet28_10_S2M2_R/last/output.plk"}
_cacheDir = "./cache"
_maxRuns = 10000
_min_examples = -1
# ========================================================
# Module internal functions and variables
_randStates = None
_rsCfg = None
def _load_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
labels = [np.full(shape=len(data[key]), fill_value=key)
for key in data]
data = [features for key in data for features in data[key]]
dataset = dict()
dataset['data'] = torch.FloatTensor(np.stack(data, axis=0))
dataset['labels'] = torch.LongTensor(np.concatenate(labels))
return dataset
# =========================================================
# Callable variables and functions from outside the module
data = None
labels = None
dsName = None
def loadDataSet(dsname):
if dsname not in _datasetFeaturesFiles:
raise NameError('Unknwown dataset: {}'.format(dsname))
global dsName, data, labels, _randStates, _rsCfg, _min_examples
dsName = dsname
_randStates = None
_rsCfg = None
# Loading data from files on computer
# home = expanduser("~")
dataset = _load_pickle(_datasetFeaturesFiles[dsname])
# Computing the number of items per class in the dataset
_min_examples = dataset["labels"].shape[0]
for i in range(dataset["labels"].shape[0]):
if torch.where(dataset["labels"] == dataset["labels"][i])[0].shape[0] > 0:
_min_examples = min(_min_examples, torch.where(
dataset["labels"] == dataset["labels"][i])[0].shape[0])
print("Guaranteed number of items per class: {:d}\n".format(_min_examples))
# Generating data tensors
data = torch.zeros((0, _min_examples, dataset["data"].shape[1]))
labels = dataset["labels"].clone()
while labels.shape[0] > 0:
indices = torch.where(dataset["labels"] == labels[0])[0]
data = torch.cat([data, dataset["data"][indices, :]
[:_min_examples].view(1, _min_examples, -1)], dim=0)
indices = torch.where(labels != labels[0])[0]
labels = labels[indices]
print("Total of {:d} classes, {:d} elements each, with dimension {:d}\n".format(
data.shape[0], data.shape[1], data.shape[2]))
def GenerateRun(iRun, cfg, regenRState=False, generate=True):
global _randStates, data, _min_examples
if not regenRState:
np.random.set_state(_randStates[iRun])
classes = np.random.permutation(np.arange(data.shape[0]))[:cfg["ways"]]
shuffle_indices = np.arange(_min_examples)
dataset = None
if generate:
dataset = torch.zeros(
(cfg['ways'], cfg['shot']+cfg['queries'], data.shape[2]))
for i in range(cfg['ways']):
shuffle_indices = np.random.permutation(shuffle_indices)
if generate:
dataset[i] = data[classes[i], shuffle_indices,
:][:cfg['shot']+cfg['queries']]
return dataset
def ClassesInRun(iRun, cfg):
global _randStates, data
np.random.set_state(_randStates[iRun])
classes = np.random.permutation(np.arange(data.shape[0]))[:cfg["ways"]]
return classes
def setRandomStates(cfg):
global _randStates, _maxRuns, _rsCfg
if _rsCfg == cfg:
return
rsFile = os.path.join(_cacheDir, "RandStates_{}_s{}_q{}_w{}".format(
dsName, cfg['shot'], cfg['queries'], cfg['ways']))
if not os.path.exists(rsFile):
print("{} does not exist, regenerating it...".format(rsFile))
np.random.seed(0)
_randStates = []
for iRun in range(_maxRuns):
_randStates.append(np.random.get_state())
GenerateRun(iRun, cfg, regenRState=True, generate=False)
torch.save(_randStates, rsFile)
else:
print("reloading random states from file....")
_randStates = torch.load(rsFile)
_rsCfg = cfg
def GenerateRunSet(start=None, end=None, cfg=None):
global dataset, _maxRuns
if start is None:
start = 0
if end is None:
end = _maxRuns
if cfg is None:
cfg = {"shot": 1, "ways": 5, "queries": 15}
setRandomStates(cfg)
print("generating task from {} to {}".format(start, end))
dataset = torch.zeros(
(end-start, cfg['ways'], cfg['shot']+cfg['queries'], data.shape[2]))
for iRun in range(end-start):
dataset[iRun] = GenerateRun(start+iRun, cfg)
return dataset
# define a main code to test this module
if __name__ == "__main__":
print("Testing Task loader for Few Shot Learning")
loadDataSet('miniimagenet')
cfg = {"shot": 1, "ways": 5, "queries": 15}
setRandomStates(cfg)
run10 = GenerateRun(10, cfg)
print("First call:", run10[:2, :2, :2])
run10 = GenerateRun(10, cfg)
print("Second call:", run10[:2, :2, :2])
ds = GenerateRunSet(start=2, end=12, cfg=cfg)
print("Third call:", ds[8, :2, :2, :2])
print(ds.size())
| 5,459 | 32.090909 | 109 | py |
PT-MAP | PT-MAP-master/train_cifar.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import argparse
import csv
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from data.datamgr import SimpleDataManager
import configs
import wrn_mixup_model
import res_mixup_model
from io_utils import parse_args, get_resume_file ,get_assigned_file
use_gpu = torch.cuda.is_available()
def train_manifold_mixup(base_loader, base_loader_test, model, start_epoch, stop_epoch, params):
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
print("stop_epoch", start_epoch, stop_epoch)
for epoch in range(start_epoch, stop_epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
reg_loss = 0
correct = 0
correct1 = 0.0
total = 0
for batch_idx, (input_var, target_var) in enumerate(base_loader):
if use_gpu:
input_var, target_var = input_var.cuda(), target_var.cuda()
input_var, target_var = Variable(input_var), Variable(target_var)
lam = np.random.beta(params.alpha, params.alpha)
_ , outputs , target_a , target_b = model(input_var, target_var, mixup_hidden= True, mixup_alpha = params.alpha , lam = lam)
loss = mixup_criterion(criterion, outputs, target_a, target_b, lam)
train_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += target_var.size(0)
correct += (lam * predicted.eq(target_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(target_b.data).cpu().sum().float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx%50 ==0 :
print('{0}/{1}'.format(batch_idx,len(base_loader)), 'Loss: %.3f | Acc: %.3f%% '
% (train_loss/(batch_idx+1),100.*correct/total))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() }, outfile)
model.eval()
with torch.no_grad():
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(base_loader_test):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
f , outputs = model.forward(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), 100.*correct/total ))
torch.cuda.empty_cache()
return model
def train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params , tmp):
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
if use_gpu:
rotate_classifier.cuda()
if tmp is not None and 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
lossfn = nn.CrossEntropyLoss()
max_acc = 0
print("stop_epoch" , start_epoch, stop_epoch )
for epoch in range(start_epoch,stop_epoch):
rotate_classifier.train()
model.train()
avg_loss=0
avg_rloss=0
for i, (x,y) in enumerate(base_loader):
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model.forward(x_)
rotate_scores = rotate_classifier(f)
optimizer.zero_grad()
rloss = lossfn(rotate_scores,a_)
closs = lossfn(scores, y_)
loss = 0.5*closs + 0.5*rloss
loss.backward()
optimizer.step()
avg_loss = avg_loss+closs.data.item()
avg_rloss = avg_rloss+rloss.data.item()
if i % 50 ==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Rotate Loss {:f}'.format(epoch, i, len(base_loader), avg_loss/float(i+1),avg_rloss/float(i+1) ))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() , 'rotate': rotate_classifier.state_dict()}, outfile)
model.eval()
rotate_classifier.eval()
with torch.no_grad():
correct = rcorrect = total = 0
for i,(x,y) in enumerate(base_loader_test):
if i<10:
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model(x_)
rotate_scores = rotate_classifier(f)
p1 = torch.argmax(scores,1)
correct += (p1==y_).sum().item()
total += p1.size(0)
p2 = torch.argmax(rotate_scores,1)
rcorrect += (p2==a_).sum().item()
print("Epoch {0} : Accuracy {1}, Rotate Accuracy {2}".format(epoch,(float(correct)*100)/total,(float(rcorrect)*100)/total))
torch.cuda.empty_cache()
return model
if __name__ == '__main__':
params = parse_args('train')
params.dataset = 'cifar'
image_size = 32
base_file = configs.data_dir[params.dataset] + 'base.json'
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
base_datamgr = SimpleDataManager(image_size, batch_size = params.batch_size)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
base_datamgr_test = SimpleDataManager(image_size, batch_size = params.test_batch_size)
base_loader_test = base_datamgr_test.get_data_loader( base_file , aug = False )
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=64)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=64)
if params.method =='S2M2_R':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
else:
resume_rotate_file_dir = params.checkpoint_dir.replace("S2M2_R","rotation")
resume_file = get_resume_file( resume_rotate_file_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
state_keys = list(state.keys())
'''
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state[key.replace("classifier.","linear.")] = state[key]
state.pop(key)
'''
model.load_state_dict(state)
model = train_manifold_mixup(base_loader, base_loader_test, model, start_epoch, start_epoch+stop_epoch, params)
elif params.method =='rotation':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
model = train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, None)
| 11,421 | 35.375796 | 199 | py |
PT-MAP | PT-MAP-master/train.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import argparse
import csv
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from data.datamgr import SimpleDataManager , SetDataManager
import configs
import wrn_mixup_model
import res_mixup_model
from io_utils import parse_args, get_resume_file ,get_assigned_file
from os import path
use_gpu = torch.cuda.is_available()
image_size = 84
def train_s2m2(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, tmp):
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
criterion = nn.CrossEntropyLoss()
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
rotate_classifier.cuda()
if 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
print("stop_epoch", start_epoch, stop_epoch)
for epoch in range(start_epoch, stop_epoch):
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
rotate_loss = 0
correct = 0
total = 0
torch.cuda.empty_cache()
for batch_idx, (inputs, targets) in enumerate(base_loader):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
lam = np.random.beta(params.alpha, params.alpha)
f , outputs , target_a , target_b = model(inputs, targets, mixup_hidden= True , mixup_alpha = params.alpha , lam = lam)
loss = mixup_criterion(criterion, outputs, target_a, target_b, lam)
train_loss += loss.data.item()
optimizer.zero_grad()
loss.backward()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(target_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(target_b.data).cpu().sum().float())
bs = inputs.size(0)
inputs_ = []
targets_ = []
a_ = []
indices = np.arange(bs)
np.random.shuffle(indices)
split_size = int(bs/4)
for j in indices[0:split_size]:
x90 = inputs[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
inputs_ += [inputs[j], x90, x180, x270]
targets_ += [targets[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
inputs = Variable(torch.stack(inputs_,0))
targets = Variable(torch.stack(targets_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
a_ = a_.cuda()
rf , outputs = model(inputs)
rotate_outputs = rotate_classifier(rf)
rloss = criterion(rotate_outputs,a_)
closs = criterion(outputs, targets)
loss = (rloss+closs)/2.0
rotate_loss += rloss.data.item()
loss.backward()
optimizer.step()
if batch_idx%50 ==0 :
print('{0}/{1}'.format(batch_idx,len(base_loader)),
'Loss: %.3f | Acc: %.3f%% | RotLoss: %.3f '
% (train_loss/(batch_idx+1),
100.*correct/total,rotate_loss/(batch_idx+1)))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() }, outfile)
model.eval()
with torch.no_grad():
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(base_loader_test):
if use_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
f , outputs = model.forward(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), 100.*correct/total ))
return model
def train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, tmp):
if params.model == 'WideResNet28_10':
rotate_classifier = nn.Sequential(nn.Linear(640,4))
elif params.model == 'ResNet18':
rotate_classifier = nn.Sequential(nn.Linear(512,4))
if use_gpu:
rotate_classifier.cuda()
if 'rotate' in tmp:
print("loading rotate model")
rotate_classifier.load_state_dict(tmp['rotate'])
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': rotate_classifier.parameters()}
])
lossfn = nn.CrossEntropyLoss()
max_acc = 0
print("stop_epoch" , start_epoch, stop_epoch )
for epoch in range(start_epoch,stop_epoch):
rotate_classifier.train()
model.train()
avg_loss=0
avg_rloss=0
for i, (x,y) in enumerate(base_loader):
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model.forward(x_)
rotate_scores = rotate_classifier(f)
optimizer.zero_grad()
rloss = lossfn(rotate_scores,a_)
closs = lossfn(scores, y_)
loss = closs + rloss
loss.backward()
optimizer.step()
avg_loss = avg_loss+closs.data.item()
avg_rloss = avg_rloss+rloss.data.item()
if i % 50 ==0:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Rotate Loss {:f}'.format(epoch, i, len(base_loader), avg_loss/float(i+1),avg_rloss/float(i+1) ))
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (epoch % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict() , 'rotate': rotate_classifier.state_dict()}, outfile)
model.eval()
rotate_classifier.eval()
with torch.no_grad():
correct = rcorrect = total = 0
for i,(x,y) in enumerate(base_loader_test):
if i<2:
bs = x.size(0)
x_ = []
y_ = []
a_ = []
for j in range(bs):
x90 = x[j].transpose(2,1).flip(1)
x180 = x90.transpose(2,1).flip(1)
x270 = x180.transpose(2,1).flip(1)
x_ += [x[j], x90, x180, x270]
y_ += [y[j] for _ in range(4)]
a_ += [torch.tensor(0),torch.tensor(1),torch.tensor(2),torch.tensor(3)]
x_ = Variable(torch.stack(x_,0))
y_ = Variable(torch.stack(y_,0))
a_ = Variable(torch.stack(a_,0))
if use_gpu:
x_ = x_.cuda()
y_ = y_.cuda()
a_ = a_.cuda()
f,scores = model(x_)
rotate_scores = rotate_classifier(f)
p1 = torch.argmax(scores,1)
correct += (p1==y_).sum().item()
total += p1.size(0)
p2 = torch.argmax(rotate_scores,1)
rcorrect += (p2==a_).sum().item()
print("Epoch {0} : Accuracy {1}, Rotate Accuracy {2}".format(epoch,(float(correct)*100)/total,(float(rcorrect)*100)/total))
torch.cuda.empty_cache()
return model
if __name__ == '__main__':
params = parse_args('train')
base_file = configs.data_dir[params.dataset] + 'base.json'
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
base_datamgr = SimpleDataManager(image_size, batch_size = params.batch_size)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
base_datamgr_test = SimpleDataManager(image_size, batch_size = params.test_batch_size)
base_loader_test = base_datamgr_test.get_data_loader( base_file , aug = False )
if params.model == 'WideResNet28_10':
model = wrn_mixup_model.wrn28_10(num_classes=params.num_classes)
elif params.model == 'ResNet18':
model = res_mixup_model.resnet18(num_classes=params.num_classes)
if params.method =='S2M2_R':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
else:
resume_rotate_file_dir = params.checkpoint_dir.replace("S2M2_R","rotation")
resume_file = get_resume_file( resume_rotate_file_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
state_keys = list(state.keys())
'''
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state[key.replace("classifier.","linear.")] = state[key]
state.pop(key)
'''
model.load_state_dict(state)
model = train_s2m2(base_loader, base_loader_test, model, start_epoch, start_epoch+stop_epoch, params, {})
elif params.method =='rotation':
if use_gpu:
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids = range(torch.cuda.device_count()))
model.cuda()
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir )
print("resume_file" , resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
print("restored epoch is" , tmp['epoch'])
state = tmp['state']
model.load_state_dict(state)
model = train_rotation(base_loader, base_loader_test, model, start_epoch, stop_epoch, params, {})
| 13,168 | 35.278237 | 199 | py |
PT-MAP | PT-MAP-master/data/additional_transforms.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from PIL import ImageEnhance
transformtypedict=dict(Brightness=ImageEnhance.Brightness, Contrast=ImageEnhance.Contrast, Sharpness=ImageEnhance.Sharpness, Color=ImageEnhance.Color)
class ImageJitter(object):
def __init__(self, transformdict):
self.transforms = [(transformtypedict[k], transformdict[k]) for k in transformdict]
def __call__(self, img):
out = img
randtensor = torch.rand(len(self.transforms))
for i, (transformer, alpha) in enumerate(self.transforms):
r = alpha*(randtensor[i]*2.0 -1.0) + 1
out = transformer(out).enhance(r).convert('RGB')
return out
| 850 | 24.787879 | 150 | py |
PT-MAP | PT-MAP-master/data/dataset.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import json
import numpy as np
import torchvision.transforms as transforms
import os
identity = lambda x:x
class SimpleDataset:
def __init__(self, data_file, transform, target_transform=identity):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
image_path = os.path.join(self.meta['image_names'][i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.meta['image_labels'][i])
return img, target
def __len__(self):
return len(self.meta['image_names'])
class SetDataset:
def __init__(self, data_file, batch_size, transform):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for x,y in zip(self.meta['image_names'],self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size = batch_size,
shuffle = True,
num_workers = 0, #use main thread only or may receive multiple batches
pin_memory = False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform )
self.sub_dataloader.append( torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )
def __getitem__(self,i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list)
class SubDataset:
def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity):
self.sub_meta = sub_meta
self.cl = cl
self.transform = transform
self.target_transform = target_transform
def __getitem__(self,i):
#print( '%d -%d' %(self.cl,i))
image_path = os.path.join( self.sub_meta[i])
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
target = self.target_transform(self.cl)
return img, target
def __len__(self):
return len(self.sub_meta)
class EpisodicBatchSampler(object):
def __init__(self, n_classes, n_way, n_episodes):
self.n_classes = n_classes
self.n_way = n_way
self.n_episodes = n_episodes
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
yield torch.randperm(self.n_classes)[:self.n_way]
| 2,920 | 32.193182 | 108 | py |
PT-MAP | PT-MAP-master/data/datamgr.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
import data.additional_transforms as add_transforms
from data.dataset import SimpleDataset, SetDataset, EpisodicBatchSampler
from abc import abstractmethod
class TransformLoader:
def __init__(self, image_size,
normalize_param = dict(mean= [0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
jitter_param = dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
self.image_size = image_size
self.normalize_param = normalize_param
self.jitter_param = jitter_param
def parse_transform(self, transform_type):
if transform_type=='ImageJitter':
method = add_transforms.ImageJitter( self.jitter_param )
return method
method = getattr(transforms, transform_type)
if transform_type=='RandomSizedCrop':
return method(self.image_size)
elif transform_type=='CenterCrop':
return method(self.image_size)
elif transform_type=='Scale':
return method([int(self.image_size*1.15), int(self.image_size*1.15)])
elif transform_type=='Normalize':
return method(**self.normalize_param )
else:
return method()
def get_composed_transform(self, aug = False):
if aug:
transform_list = ['RandomSizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']
else:
transform_list = ['Scale','CenterCrop', 'ToTensor', 'Normalize']
transform_funcs = [ self.parse_transform(x) for x in transform_list]
transform = transforms.Compose(transform_funcs)
return transform
class DataManager:
@abstractmethod
def get_data_loader(self, data_file, aug):
pass
class SimpleDataManager(DataManager):
def __init__(self, image_size, batch_size):
super(SimpleDataManager, self).__init__()
self.batch_size = batch_size
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SimpleDataset(data_file, transform)
data_loader_params = dict(batch_size = self.batch_size, shuffle = True, num_workers = 12, pin_memory = True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
class SetDataManager(DataManager):
def __init__(self, image_size, n_way, n_support, n_query, n_eposide =100):
super(SetDataManager, self).__init__()
self.image_size = image_size
self.n_way = n_way
self.batch_size = n_support + n_query
self.n_eposide = n_eposide
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, data_file, aug): #parameters that would change on train/val set
transform = self.trans_loader.get_composed_transform(aug)
dataset = SetDataset( data_file , self.batch_size, transform )
sampler = EpisodicBatchSampler(len(dataset), self.n_way, self.n_eposide )
data_loader_params = dict(batch_sampler = sampler, num_workers = 12, pin_memory = True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader
| 3,515 | 40.857143 | 123 | py |
GSA | GSA-main/GSA_CVPR/utils.py | import torch
import torch.nn.functional as F
def cutmix_data(x, y, Basic_model,alpha=1.0, cutmix_prob=0.5,):
assert alpha > 0
# generate mixed sample
lam = np.random.beta(alpha, alpha)
batch_size = x.size()[0]
index = torch.randperm(batch_size)
if torch.cuda.is_available():
index = index.cuda()
y_a, y_b = y, y[index]
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam,x,Basic_model)
#for ii in range(batch_size):x[ii,:,bbx1[ii]:bbx2[ii],bby1[ii]:bby2[ii]]=x[index][ii,:,bbx1[index][ii]:bbx2[index][ii],bby1[index][ii]:bby2[index][ii]]
x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))
return x, y_a, y_b, lam
def rand_bbox(size, lam,x,Basic_model):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
feat = feat_normalized(Basic_model, x).reshape(-1,W,H)
import pdb
#pdb.set_trace()
# cx=torch.mean(feat,dim=2).max(dim=1)[1].cpu()
# cy=torch.mean(feat,dim=1).max(dim=1)[1].cpu()
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def flip_inner(x, flip1, flip2):
num = x.shape[0]
# print(num)
a = x # .permute(0,1,3,2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
if flip1:
s1 = torch.flip(s1, (3,)) # torch.rot90(s1, 2*rot1, (2, 3))
if flip2:
s2 = torch.flip(s2, (3,)) # torch.rot90(s2, 2*rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
# imshow(torchvision.utils.make_grid(s[2]))
# print("s",s.shape)
# S = s.permute(0,1, 2, 3, 4) # .view(3,32,32)
# print("S",S.shape)
S = s.reshape(num, 3, 32, 32)
# S =S.permute(0,1,3,2)
# imshow(torchvision.utils.make_grid(S[2]))
# print("S", S.shape)
return S
def RandomFlip(x, num):
# print(x.shape)
#aug_x = simclr_aug(x)
# x=simclr_aug(x)
X = []
# print(x.shape)
# for i in range(4):
X.append(simclr_aug(x))
X.append(flip_inner(simclr_aug(x), 1, 1))
X.append(flip_inner(x, 0, 1))
X.append(flip_inner(x, 1, 0))
# else:
# x1=rot_inner(x,0,1)
return torch.cat([X[i] for i in range(num)], dim=0)
def rot_inner(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
a = x.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
import pdb
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
a = torch.rot90(a, 2, (3, 4))
s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
3, 2)
return R
def square_diagonal_16(x):
num = x.shape[0]
# print(num)
R = x.repeat(16, 1, 1, 1)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 1,2,3]
index2 = [0,1,3,2]
index3 = [0,2,3,1]
index4 = [0,2,1,3]# 2, 1, 3]
index5 = [0,3, 1, 2]
index6=[0,3,2,1]
index7=[1,0,2,3]
index8=[1,0,3,2]
index9 = [1, 2, 3, 0]
index10 = [1, 2, 0, 3]
index11 = [1, 3, 2, 0]
index12 = [1, 3, 0, 2]
index13 = [2, 0, 1, 3]
index14=[2,0,3,1]
index15=[2,1,0,3]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
vvv4 = vvv[:, :, index4].reshape(-1, 3, 2, 2, 16, 16)
vvv5 = vvv[:, :, index5].reshape(-1, 3, 2, 2, 16, 16)
vvv6 = vvv[:, :, index6].reshape(-1, 3, 2, 2, 16, 16)
vvv7 = vvv[:, :, index7].reshape(-1, 3, 2, 2, 16, 16)
vvv8 = vvv[:, :, index8].reshape(-1, 3, 2, 2, 16, 16)
vvv9 = vvv[:, :, index9].reshape(-1, 3, 2, 2, 16, 16)
vvv10 = vvv[:, :, index10].reshape(-1, 3, 2, 2, 16, 16)
vvv11 = vvv[:, :, index11].reshape(-1, 3, 2, 2, 16, 16)
vvv12 = vvv[:, :, index12].reshape(-1, 3, 2, 2, 16, 16)
vvv13 = vvv[:, :, index13].reshape(-1, 3, 2, 2, 16, 16)
vvv14 = vvv[:, :, index14].reshape(-1, 3, 2, 2, 16, 16)
vvv15 = vvv[:, :, index15].reshape(-1, 3, 2, 2, 16, 16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
vvv4 = torch.cat((vvv4[:, :, 0].squeeze(2), vvv4[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv4 = torch.cat((vvv4[:, :, 0].squeeze(2), vvv4[:, :, 1].squeeze(2)), dim=3)
vvv5 = torch.cat((vvv5[:, :, 0].squeeze(2), vvv5[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv5 = torch.cat((vvv5[:, :, 0].squeeze(2), vvv5[:, :, 1].squeeze(2)), dim=3)
vvv6 = torch.cat((vvv6[:, :, 0].squeeze(2), vvv6[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv6 = torch.cat((vvv6[:, :, 0].squeeze(2), vvv6[:, :, 1].squeeze(2)), dim=3)
vvv7 = torch.cat((vvv7[:, :, 0].squeeze(2), vvv7[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv7 = torch.cat((vvv7[:, :, 0].squeeze(2), vvv7[:, :, 1].squeeze(2)), dim=3)
vvv8 = torch.cat((vvv8[:, :, 0].squeeze(2), vvv8[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv8 = torch.cat((vvv8[:, :, 0].squeeze(2), vvv8[:, :, 1].squeeze(2)), dim=3)
vvv9 = torch.cat((vvv9[:, :, 0].squeeze(2), vvv9[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv9 = torch.cat((vvv9[:, :, 0].squeeze(2), vvv9[:, :, 1].squeeze(2)), dim=3)
vvv10 = torch.cat((vvv10[:, :, 0].squeeze(2), vvv10[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv10 = torch.cat((vvv10[:, :, 0].squeeze(2), vvv10[:, :, 1].squeeze(2)), dim=3)
vvv11 = torch.cat((vvv11[:, :, 0].squeeze(2), vvv11[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv11 = torch.cat((vvv11[:, :, 0].squeeze(2), vvv11[:, :, 1].squeeze(2)), dim=3)
vvv12 = torch.cat((vvv12[:, :, 0].squeeze(2), vvv12[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv12 = torch.cat((vvv12[:, :, 0].squeeze(2), vvv12[:, :, 1].squeeze(2)), dim=3)
vvv13 = torch.cat((vvv13[:, :, 0].squeeze(2), vvv13[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv13 = torch.cat((vvv13[:, :, 0].squeeze(2), vvv13[:, :, 1].squeeze(2)), dim=3)
vvv14 = torch.cat((vvv14[:, :, 0].squeeze(2), vvv14[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv14 = torch.cat((vvv14[:, :, 0].squeeze(2), vvv14[:, :, 1].squeeze(2)), dim=3)
vvv15 = torch.cat((vvv15[:, :, 0].squeeze(2), vvv15[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv15 = torch.cat((vvv15[:, :, 0].squeeze(2), vvv15[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:4*num] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
R[
4 * num:5 * num] = vvv4 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
5*num:6 * num] = vvv5 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[6 * num:7 * num] = vvv6
R[
7 * num:8 * num] = vvv7 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
8 * num:9 * num] = vvv8 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[9 * num:10 * num] = vvv9
R[
10 * num:11 * num] = vvv10 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
11 * num:12 * num] = vvv11 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[12 * num:13 * num] = vvv12
R[
13 * num:14 * num] = vvv13 # torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[
14 * num:15 * num] = vvv14 # torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[15 * num:16 * num] = vvv15
#3, 2)
#312 78.7
#
return R
def square_diagonal(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
#a = x.permute(0, 1, 3, 2)
#a = a.view(num, 3, 2, 16, 32)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 2,1,3]
index2 = [3,1,2,0]
index3 = [3,2,1,0]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
#vvv1 = vvv[:, :, index_r]
#vvv2 = vvv[:, :, :,index_r]
#vvv3 = vvv1[:, :, :, index_r]
# vvv2 = vvv3[:, :, index_r]
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
# a = a.permute(2, 0, 1, 3, 4)
# s1 = a[0] # .permute(1,0, 2, 3)#, 4)
# s2 = a[1] # .permute(1,0, 2, 3)
#a = torch.rot90(a, 2, (3, 4))
#s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
#s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
#3, 2)
#312 78.7
#
return R
def square_diagonal_repeat(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
#a = x.permute(0, 1, 3, 2)
#a = a.view(num, 3, 2, 16, 32)
uuu = x.unfold(2, 16, 16)
vvv = uuu.unfold(3, 16, 16)
vvv=vvv.reshape(-1,3,4,16,16)
index1 = [0, 0,0,0]
index2 = [1,1,1,1]
index3 = [2,2,2,2]
index_r = [1, 0]
vvv1 = vvv[:, :, index1].reshape(-1,3,2,2,16,16)
vvv2 = vvv[:, :, index2].reshape(-1,3,2,2,16,16)
vvv3 = vvv[:, :, index3].reshape(-1,3,2,2,16,16)
#vvv1 = vvv[:, :, index_r]
#vvv2 = vvv[:, :, :,index_r]
#vvv3 = vvv1[:, :, :, index_r]
# vvv2 = vvv3[:, :, index_r]
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv1 = torch.cat((vvv1[:, :, 0].squeeze(2), vvv1[:, :, 1].squeeze(2)), dim=3)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv2 = torch.cat((vvv2[:, :, 0].squeeze(2), vvv2[:, :, 1].squeeze(2)), dim=3)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3) # vvv.reshape(-1,3,2,32,16)
vvv3 = torch.cat((vvv3[:, :, 0].squeeze(2), vvv3[:, :, 1].squeeze(2)), dim=3)
import pdb
'''
uvi = square_diagonal(x)
imshow(torchvision.utils.make_grid(uvi[0]))
imshow(torchvision.utils.make_grid(uvi[10]))
imshow(torchvision.utils.make_grid(uvi[20]))
imshow(torchvision.utils.make_grid(uvi[30]))
'''
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
# a = a.permute(2, 0, 1, 3, 4)
# s1 = a[0] # .permute(1,0, 2, 3)#, 4)
# s2 = a[1] # .permute(1,0, 2, 3)
#a = torch.rot90(a, 2, (3, 4))
#s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
#s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = vvv3#torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[num:2 * num] = vvv1#torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1, 3, 2)
R[2 * num:3 * num] = vvv2#torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32).permute(0, 1,
#3, 2)
#312 78.7
#
return R
def rot_inner_hlip(x):
num = x.shape[0]
# print(num)
R = x.repeat(4, 1, 1, 1)
a = x#.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
import pdb
# pdb.set_trace()
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
a = torch.rot90(a, 2, (3, 4))
s1_1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2_2 = a[1] # .permute(1,0, 2, 3)
# S0 = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 1, 28, 28).permute(0, 1, 3, 2)
R[3 * num:] = torch.cat((s1_1.unsqueeze(2), s2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1, 3, 2)
R[num:2 * num] = torch.cat((s1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1, 3, 2)
R[2 * num:3 * num] = torch.cat((s1_1.unsqueeze(2), s2_2.unsqueeze(2)), dim=2).reshape(num, 3, 32, 32)#.permute(0, 1,
# 3, 2)
return R
def Rotation(x, oop):
# print(x.shape)
num = x.shape[0]
X = square_diagonal(x)#rot_inner(x) # , 1, 0)
# X = rot_inner(X)
X2=rot_inner(x)
return torch.cat((X, torch.rot90(X, 1, (2, 3)), torch.rot90(X, 2, (2, 3)), torch.rot90(X, 3, (2, 3)),X2,torch.rot90(X2, 1, (2, 3))), dim=0)[
:num * oop]
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img=img/2+0.5
npimg=img.cpu().numpy()
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()
def feat_normalized_hat(model,x,task_id):
images = x.cuda(non_blocking=True)
feat_map = model.f_train_feat_map(images,t=task_id,s=1) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def feat_cam_normalized(model,x,y):
images = x.cuda(non_blocking=True)
feat_map = model.module.f_train_feat_map(images) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
#import pdb
#pdb.set_trace()
feat_map=torch.bmm(model.module.linear.weight[y].unsqueeze(1),feat_map.reshape(N,Cf,Hf*Wf))
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def feat_normalized(model,x):
images = x.cuda(non_blocking=True)
feat_map = model.f_train_feat_map(images) # (N, C, H, W)
N, Cf, Hf, Wf = feat_map.shape
eval_train_map = feat_map.sum(1).view(N, -1) # (N, Hf*Wf)
eval_train_map = eval_train_map - eval_train_map.min(1, keepdim=True)[0]
eval_train_map = eval_train_map / eval_train_map.max(1, keepdim=True)[0]
eval_train_map = eval_train_map.view(N, 1, Hf, Wf)
eval_train_map = F.interpolate(eval_train_map, size=images.shape[-2:], mode='bilinear')
return eval_train_map
def Hbeta_torch(D, beta=1.0):
P = torch.exp(-D.clone() * beta)
sumP = torch.sum(P)
H = torch.log(sumP) + beta * torch.sum(D * P) / sumP
P = P / sumP
return H, P
def x2p_torch(X, tol=1e-5, perplexity=30.0):
"""
Performs a binary search to get P-values in such a way that each
conditional Gaussian has the same perplexity.
"""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape
sum_X = torch.sum(X*X, 1)
D = torch.add(torch.add(-2 * torch.mm(X, X.t()), sum_X).t(), sum_X)
P = torch.zeros(n, n)
beta = torch.ones(n, 1)
logU = torch.log(torch.tensor([perplexity]))
n_list = [i for i in range(n)]
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point %d of %d..." % (i, n))
# Compute the Gaussian kernel and entropy for the current precision
# there may be something wrong with this setting None
betamin = None
betamax = None
Di = D[i, n_list[0:i]+n_list[i+1:n]]
(H, thisP) = Hbeta_torch(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while torch.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].clone()
if betamax is None:
beta[i] = beta[i] * 2.
else:
beta[i] = (beta[i] + betamax) / 2.
else:
betamax = beta[i].clone()
if betamin is None:
beta[i] = beta[i] / 2.
else:
beta[i] = (beta[i] + betamin) / 2.
# Recompute the values
(H, thisP) = Hbeta_torch(Di, beta[i])
Hdiff = H - logU
tries += 1
# Set the final row of P
P[i, n_list[0:i]+n_list[i+1:n]] = thisP
# Return final P-matrix
return P
def pca_torch(X, no_dims=50):
print("Preprocessing the data using PCA...")
(n, d) = X.shape
X = X - torch.mean(X, 0)
(l, M) = torch.eig(torch.mm(X.t(), X), True)
# split M real
# this part may be some difference for complex eigenvalue
# but complex eignevalue is meanless here, so they are replaced by their real part
i = 0
while i < d:
if l[i, 1] != 0:
M[:, i+1] = M[:, i]
i += 2
else:
i += 1
Y = torch.mm(X, M[:, 0:no_dims])
return Y
def tsne(X, no_dims=2, initial_dims=50, perplexity=30.0):
"""
Runs t-SNE on the dataset in the NxD array X to reduce its
dimensionality to no_dims dimensions. The syntaxis of the function is
`Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array.
"""
# Check inputs
if isinstance(no_dims, float):
print("Error: array X should not have type float.")
return -1
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1
# Initialize variables
X = pca_torch(X, initial_dims)
(n, d) = X.shape
max_iter = 1000
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
Y = torch.randn(n, no_dims)
dY = torch.zeros(n, no_dims)
iY = torch.zeros(n, no_dims)
gains = torch.ones(n, no_dims)
# Compute P-values
P = x2p_torch(X, 1e-5, perplexity)
P = P + P.t()
P = P / torch.sum(P)
P = P * 4. # early exaggeration
print("get P shape", P.shape)
P = torch.max(P, torch.tensor([1e-21]))
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = torch.sum(Y*Y, 1)
num = -2. * torch.mm(Y, Y.t())
num = 1. / (1. + torch.add(torch.add(num, sum_Y).t(), sum_Y))
num[range(n), range(n)] = 0.
Q = num / torch.sum(num)
Q = torch.max(Q, torch.tensor([1e-12]))
# Compute gradient
PQ = P - Q
for i in range(n):
dY[i, :] = torch.sum((PQ[:, i] * num[:, i]).repeat(no_dims, 1).t() * (Y[i, :] - Y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0.) != (iY > 0.)).double() + (gains * 0.8) * ((dY > 0.) == (iY > 0.)).double()
gains[gains < min_gain] = min_gain
iY = momentum * iY - eta * (gains * dY)
Y = Y + iY
Y = Y - torch.mean(Y, 0)
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = torch.sum(P * torch.log(P / Q))
print("Iteration %d: error is %f" % (iter + 1, C))
# Stop lying about P-values
if iter == 100:
P = P / 4.
# Return solution
return Y
def test_model_conti(Basic_model,Loder,j):
test_accuracy = 0
task_num=len(Loder)
for kk in range(len(Loder)):
k=j
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(Loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask=torch.nn.functional.one_hot(target%10,num_classes=10)
# pdb.set_trace()
pred = Basic_model.forward(data)#[:,:10*task_num]#torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)]*mask,Basic_model.forward(data)[:,10*(j):10*(j+1)]),dim=1)
pred[:,10*k:10*(k+1)]=pred[:,10*k:10*(k+1)]*mask
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy += (100. * correct / num)#*0.5 # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy/task_num
def test_model_task(Basic_model,loder1,loder2, i,j):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder1):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask=torch.nn.functional.one_hot(target%10,num_classes=10)
# pdb.set_trace()
pred = torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)]*mask,Basic_model.forward(data)[:,10*(j):10*(j+1)]),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = (100. * correct / num)*0.5 # len(data_loader.dataset)
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder2):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
mask = torch.nn.functional.one_hot(target % 10, num_classes=10)
#pdb.set_trace()
pred = torch.cat((Basic_model.forward(data)[:, 10 * (i):10 * (i + 1)],
Basic_model.forward(data)[:, 10 * (j):10 * (j + 1)]* mask),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target = target - 10 * j +10
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy += (100. * correct / num)*0.5
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_cur(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,10*(i):10*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_past(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,:10*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_mix(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = torch.cat((Basic_model.forward(data)[:,10*(i):10*(i+1)],Basic_model.forward(data)[:,-10:]),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
# print(
# 'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
# .format(i,
# test_loss, correct, num,
# 100. * correct / num, ))
return test_accuracy
def test_model_future(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,10*i:]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-10*i
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model(Basic_model,loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def get_true_prob(x, y, llabel):
num = x.size()[0]
true = []
true2 = []
for i in range(num):
if y[i] in llabel:
true.append(1)
else:
true.append(0)
# true.append(x[i][y[i]])
# true2.append(0.5)
# true.append(x[i][y[i]])
return torch.FloatTensor(true).cuda() # ,#torch.FloatTensor(true2).cuda()
def get_prob_rate(x, logits, label):
num = x.size()[0]
logits = F.softmax(logits, dim=1)
rate = []
# true2=[]
for i in range(num):
true_prob = logits[i][label[i]].item()
max_prob = torch.max(logits[i])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_prob_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_mean_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda() | 32,557 | 37.759524 | 175 | py |
GSA | GSA-main/GSA_CVPR/buffer.py | import numpy as np
import math
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
class Buffer(nn.Module):
def __init__(self, args, input_size=None):
super().__init__()
self.args = args
self.k = 0.03
self.place_left = True
if input_size is None:
input_size = args.input_size
# TODO(change this:)
if args.gen:
if 'mnist' in args.dataset:
img_size = 784
economy = img_size // input_size[0]
elif 'cifar' in args.dataset:
img_size = 32 * 32 * 3
economy = img_size // (input_size[0] ** 2)
elif 'imagenet' in args.dataset:
img_size = 84 * 84 * 3
economy = img_size // (input_size[0] ** 2)
else:
economy = 1
buffer_size = args.buffer_size
print('buffer has %d slots' % buffer_size,args.buffer_size)
bx = torch.FloatTensor(buffer_size, *input_size).fill_(0)
print("bx",bx.shape)
by = torch.LongTensor(buffer_size).fill_(0)
bt = torch.LongTensor(buffer_size).fill_(0)
logits = torch.FloatTensor(buffer_size, args.n_classes).fill_(0)
feature= torch.FloatTensor(buffer_size, 512).fill_(0)
#if args.cuda:
bx = bx.cuda()#to(args.device)
by = by.cuda()#to(args.device)
bt = bt.cuda()#to(args.device)
logits = logits.cuda()#to(args.device)
feature=feature.cuda()
self.save_logits=None
self.current_index = 0
self.n_seen_so_far = 0
self.is_full = 0
# registering as buffer allows us to save the object using `torch.save`
self.register_buffer('bx', bx)
self.register_buffer('by', by)
self.register_buffer('bt', bt)
self.register_buffer('logits', logits)
self.register_buffer('feature',feature)
self.to_one_hot = lambda x : x.new(x.size(0), args.n_classes).fill_(0).scatter_(1, x.unsqueeze(1), 1)
self.arange_like = lambda x : torch.arange(x.size(0)).to(x.device)
self.shuffle = lambda x : x[torch.randperm(x.size(0))]
@property
def x(self):
return self.bx[:self.current_index]
def is_empty(self) -> bool:
"""
Returns true if the buffer is empty, false otherwise.
"""
if self.n_seen_so_far == 0:
return True
else:
return False
@property
def y(self):
return self.to_one_hot(self.by[:self.current_index])
@property
def t(self):
return self.bt[:self.current_index]
@property
def valid(self):
return self.is_valid[:self.current_index]
def display(self, gen=None, epoch=-1):
from torchvision.utils import save_image
from PIL import Image
if 'cifar' in self.args.dataset:
shp = (-1, 3, 32, 32)
elif 'tinyimagenet' in self.args.dataset:
shp = (-1, 3, 64, 64)
else:
shp = (-1, 1, 28, 28)
if gen is not None:
x = gen.decode(self.x)
else:
x = self.x
save_image((x.reshape(shp) * 0.5 + 0.5), 'samples/buffer_%d.png' % epoch, nrow=int(self.current_index ** 0.5))
#Image.open('buffer_%d.png' % epoch).show()
print(self.y.sum(dim=0))
def add_reservoir(self, x, y, logits, t):
n_elem = x.size(0)
# x=x.reshape(x.size(0),1,1,-1)
place_left = max(0, self.bx.size(0) - self.current_index)
offset = min(place_left, n_elem)
# print(self.bx.shape,x[:offset].shape)
save_logits = logits is not None
self.save_logits=logits is not None
# add whatever still fits in the buffer
place_left = max(0, self.bx.size(0) - self.current_index)
if place_left:
offset = min(place_left, n_elem)
# print(offset)
# print(self.bx[self.current_index: self.current_index + offset].data.shape)
# print(x[:offset].shape)
self.bx[self.current_index: self.current_index + offset].data.copy_(x[:offset])
self.by[self.current_index: self.current_index + offset].data.copy_(y[:offset])
self.bt[self.current_index: self.current_index + offset].fill_(t)
if save_logits:
#print("存")
self.logits[self.current_index: self.current_index + offset].data.copy_(logits[:offset])
#self.feature[self.current_index: self.current_index+offset].data.copy_(feature[:offset])
self.current_index += offset
self.n_seen_so_far += offset
# everything was added
if offset == x.size(0):
return
self.place_left = False
# remove what is already in the buffer
x, y = x[place_left:], y[place_left:]
indices = torch.FloatTensor(x.size(0)).to(x.device).uniform_(0, self.n_seen_so_far).long()
valid_indices = (indices < self.bx.size(0)).long()
idx_new_data = valid_indices.nonzero().squeeze(-1)
idx_buffer = indices[idx_new_data]
self.n_seen_so_far += x.size(0)
if idx_buffer.numel() == 0:
return
assert idx_buffer.max() < self.bx.size(0), pdb.set_trace()
assert idx_buffer.max() < self.by.size(0), pdb.set_trace()
assert idx_buffer.max() < self.bt.size(0), pdb.set_trace()
assert idx_new_data.max() < x.size(0), pdb.set_trace()
assert idx_new_data.max() < y.size(0), pdb.set_trace()
# perform overwrite op
self.bx[idx_buffer] = x[idx_new_data].cuda()
self.by[idx_buffer] = y[idx_new_data].cuda()
self.bt[idx_buffer] = t
if save_logits:
self.logits[idx_buffer] = logits[idx_new_data]
#self.feature[idx_buffer] = feature[idx_new_data]
def measure_valid(self, generator, classifier):
with torch.no_grad():
# fetch valid examples
valid_indices = self.valid.nonzero()
valid_x, valid_y = self.bx[valid_indices], self.by[valid_indices]
one_hot_y = self.to_one_hot(valid_y.flatten())
hid_x = generator.idx_2_hid(valid_x)
x_hat = generator.decode(hid_x)
logits = classifier(x_hat)
_, pred = logits.max(dim=1)
one_hot_pred = self.to_one_hot(pred)
correct = one_hot_pred * one_hot_y
per_class_correct = correct.sum(dim=0)
per_class_deno = one_hot_y.sum(dim=0)
per_class_acc = per_class_correct.float() / per_class_deno.float()
self.class_weight = 1. - per_class_acc
self.valid_acc = per_class_acc
self.valid_deno = per_class_deno
def shuffle_(self):
indices = torch.randperm(self.current_index).to(self.args.device)
self.bx = self.bx[indices]
self.by = self.by[indices]
self.bt = self.bt[indices]
def delete_up_to(self, remove_after_this_idx):
self.bx = self.bx[:remove_after_this_idx]
self.by = self.by[:remove_after_this_idx]
self.br = self.bt[:remove_after_this_idx]
def sample(self, amt, exclude_task = None, ret_ind = False):
if self.save_logits:
if exclude_task is not None:
valid_indices = (self.t != exclude_task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]#,self.feature[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
# import pdb
# pdb.set_trace()
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
# return 0
if exclude_task is not None:
valid_indices = (self.t != exclude_task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt = self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices]
else:
bx, by, bt = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices], bt[indices], indices
else:
return bx[indices], by[indices], bt[indices]
def split(self, amt):
indices = torch.randperm(self.current_index).to(self.args.device)
return indices[:amt], indices[amt:]
def presample(self, amt, task = None, ret_ind = False):
if self.save_logits:
if task is not None:
valid_indices = (self.t <= task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
return 0
def prob_index(self,distribution,amt):
n=int(len(distribution)/2)
valid_sum_indices=None
for task_index in range(n):
prob_cur_task=distribution[task_index]+distribution[task_index+1]
va_cur_index=(self.t==task_index)
valid_cur_indices = va_cur_index.nonzero().squeeze()
indices = torch.from_numpy(np.random.choice(len(valid_cur_indices), int(amt*prob_cur_task), replace=False))
valid_cur_indices=valid_cur_indices[indices]
if valid_sum_indices is None:
valid_sum_indices=(valid_cur_indices)
else:
valid_sum_indices = torch.cat((valid_cur_indices,valid_sum_indices))
return valid_sum_indices
def pro_sample(self, amt, distribution, ret_ind = False):
#task=exclude_task
#if task>=2:
# import pdb
# pdb.set_trace()
#
if self.save_logits:
#if task is not None:
# valid_indices = (self.t == task)
# valid_indices = valid_indices.nonzero().squeeze()
# bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
# else:
probi_index= self.prob_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index],self.logits[probi_index]
else:
probi_index = self.prob_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index]
def prob_class_index(self,distribution,amt):
n=int(len(distribution))
valid_sum_indices=None
# import pdb
#pdb.set_trace()
for class_index in range(n):
prob_cur_class=distribution[class_index]#+distribution[task_index+1]
va_cur_index=(self.by==class_index)
valid_cur_indices = va_cur_index.nonzero().squeeze()
indices = torch.from_numpy(np.random.choice(len(valid_cur_indices), int(amt*prob_cur_class), replace=False))
valid_cur_indices=valid_cur_indices[indices]
if valid_sum_indices is None:
valid_sum_indices=(valid_cur_indices)
else:
valid_sum_indices = torch.cat((valid_cur_indices,valid_sum_indices))
return valid_sum_indices
def pro_class_sample(self, amt, distribution, ret_ind = False):
#task=exclude_task
#if task>=2:
# import pdb
# pdb.set_trace()
#
if self.save_logits:
#if task is not None:
# valid_indices = (self.t == task)
# valid_indices = valid_indices.nonzero().squeeze()
# bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
# else:
# pdb.set_trace()
probi_index= self.prob_class_index(distribution, amt)
# bx,by,bt,logits=self.bx.squeeze(0), self.by.squeeze(0), self.bt.squeeze(0), self.logits.squeeze(0)
# if probi_index is None:probi_index=torch.tensor([], device='cuda:0', dtype=torch.int64)
# import pdb
# pdb.set_trace()
return self.bx[probi_index],self.by[probi_index],self.bt[probi_index],self.logits[probi_index]
else:
probi_index = self.prob_class_index(distribution, amt)
return self.bx[probi_index], self.by[probi_index], self.bt[probi_index]
def onlysample(self, amt, task = None, ret_ind = False):
if self.save_logits:
if task is not None:
valid_indices = (self.t == task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt, logits= self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices],self.logits[valid_indices]
else:
bx, by, bt, logits = self.bx[:self.current_index], self.by[:self.current_index], self.bt[:self.current_index],self.logits[:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, logits,bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, logits,bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
#if self.args.cuda:
indices = indices.cuda()#to(self.args.device)
if ret_ind:
return bx[indices], by[indices],logits[indices],bt[indices], indices
else:
return bx[indices], by[indices],logits[indices], bt[indices]
else:
if task is not None:
valid_indices = (self.t == task)
valid_indices = valid_indices.nonzero().squeeze()
bx, by, bt = self.bx[valid_indices], self.by[valid_indices], self.bt[valid_indices]
else:
bx, by, bt= self.bx[:self.current_index], self.by[:self.current_index], self.bt[
:self.current_index]
if bx.size(0) < amt:
if ret_ind:
return bx, by, bt, torch.from_numpy(np.arange(bx.size(0)))
else:
return bx, by, bt
else:
indices = torch.from_numpy(np.random.choice(bx.size(0), amt, replace=False))
# if self.args.cuda:
indices = indices.cuda() # to(self.args.device)
if ret_ind:
return bx[indices], by[indices], bt[indices], indices
else:
return bx[indices], by[indices], bt[indices]
def get_cifar_buffer(args, hH=8, gen=None):
args.input_size = (hH, hH)
args.gen = True
return Buffer(args, gen=gen)
| 16,903 | 38.962175 | 193 | py |
GSA | GSA-main/GSA_CVPR/Resnet18.py | # Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import relu, avg_pool2d
from typing import List
#from modified_linear import *
from torch.nn import functional as F
def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> F.conv2d:
"""
Instantiates a 3x3 convolutional layer with no bias.
:param in_planes: number of input channels
:param out_planes: number of output channels
:param stride: stride of the convolution
:return: convolutional layer
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""
The basic block of ResNet.
"""
expansion = 1
def __init__(self, in_planes: int, planes: int, stride: int = 1) -> None:
"""
Instantiates the basic block of the network.
:param in_planes: the number of input channels
:param planes: the number of channels (to be possibly expanded)
"""
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, input_size)
:return: output tensor (10)
"""
out = relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
class ResNet(nn.Module):
"""
ResNet network architecture. Designed for complex datasets.
"""
def __init__(self, block: BasicBlock, num_blocks: List[int],
num_classes: int, nf: int) -> None:
"""
Instantiates the layers of the network.
:param block: the basic ResNet block
:param num_blocks: the number of blocks per layer
:param num_classes: the number of output classes
:param nf: the number of filters
"""
super(ResNet, self).__init__()
self.in_planes = nf
self.block = block
self.num_classes = num_classes
self.nf = nf
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
self.num_classes=num_classes
self.linear = nn.Linear(nf * 8 * block.expansion, self.num_classes)#nn.utils.weight_norm(nn.Linear(nf * 8 * block.expansion, self.num_classes))
# torch.nn.init.xavier_uniform(self.linear.weight)
self.out_dim = nf * 8 * block.expansion
self.drop = nn.Dropout(p=0.2)
# self.drop2 = nn.Dropout(p=0.3)
self.simclr=nn.Linear(nf * 8 * block.expansion, 128)
self.simclr2 = nn.Linear(nf * 8 * block.expansion, 128)
self._features = nn.Sequential(self.conv1,
self.bn1,
self.layer1,
self.layer2,
self.layer3,
self.layer4
)
self.classifier = self.linear
def f_train_feat_map(self, x: torch.Tensor,mask=None) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)))
# pdb.set_trace()
out = self.layer1(out)#,None)#,mask) # 64, 32, 32
out = self.layer2(out)#,None)#,mask) # 128, 16, 16
out = self.layer3(out)#,None) # 256, 8, 8
# pdb.set_trace()
#out = self.layer4.BasicBlock0
out = self.layer4(out)#,None) # 512, 4, 4
#out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
#out = out.view(out.size(0), -1) # 512
return out
def _make_layer(self, block: BasicBlock, planes: int,
num_blocks: int, stride: int) -> nn.Module:
"""
Instantiates a ResNet layer.
:param block: ResNet basic block
:param planes: channels across the network
:param num_blocks: number of blocks
:param stride: stride
:return: ResNet layer
"""
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def f_train(self, x: torch.Tensor) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)))
# out = self.drop(out)
out = self.layer1(out) # 64, 32, 32
# out = self.drop(out)
out = self.layer2(out) # 128, 16, 16
# out = self.drop(out)
out = self.layer3(out) # 256, 8, 8
# out = self.drop(out)
out = self.layer4(out) # 512, 4, 4
# out = self.drop(out)
out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
out = out.view(out.size(0), -1) # 512
return out
def f_inter(self, x: torch.Tensor) -> torch.Tensor:
out = relu(self.bn1(self.conv1(x)),inplace=True)
out = self.layer1(out) # 64, 32, 32
out = self.layer2(out) # 128, 16, 16
# 512, 1, 1
out = self.layer3(out) # 256, 8, 8
out = self.layer4(out)
out = out.view(out.size(0), -1) # 512
return out
def forward(self, x: torch.Tensor, is_simclr=False,is_simclr2=False,is_drop=False) -> torch.Tensor:
"""
Compute a forward pass.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (output_classes)
"""
'''
out = relu(self.bn1(self.conv1(x)))
out = self.layer1(out) # 64, 32, 32
out = self.layer2(out) # 128, 16, 16
out = self.layer3(out) # 256, 8, 8
'''
out = self.f_train(x)
#out = self.drop(out)
'''
out = self.layer4(out) # 512, 4, 4
out = avg_pool2d(out, out.shape[2]) # 512, 1, 1
out = out.view(out.size(0), -1) # 512
'''
if is_simclr:
# out=self.drop2(out)
out = self.simclr(out)
elif is_drop:
#out=nn.dropout
out=self.drop(out)
out = self.linear(out)
# out=out.detach()
# out = self.drop(out)
else:
# out=out / (out.norm(dim=1, keepdim=True) + 1e-8)
# out = self.drop(out)
out = self.linear(out)
return out
def change_output_dim(self, new_dim, second_iter=False):
self.prev_weights = nn.Linear(self.out_dim, self.num_classes+new_dim)
in_features = self.out_dim
out_features = self.num_classes+new_dim
# old_embedding_weights = self.embedding.weight.data
# create a new embedding of the new size
#nn.Embedding(new_vocab_size, embedding_dim)
# initialize the values for the new embedding. this does random, but you might want to use something like GloVe
new_weights =nn.Linear(in_features,out_features)#nn.Linear(in_features,out_features,bias=False)
# as your old values may have been updated, you want to retrieve these updates values
# new_weights[:old_vocab_size] = old_embedding_weights
print("in_features:", in_features, "out_features:", out_features)
## self.weight_new =Parameter(torch.Tensor(out_features,in_features))
# new_out_features = new_dim
# num_new_classes = new_dim - out_features
#new_fc = SplitCosineLinear(in_features, out_features, num_new_classes)
# new_fc= nn.Linear(in_features,out_features)
# torch.nn.init.xavier_uniform(new_fc.weight)
# self.weight_new.data[:self.num_classes] = self.linear.weight.data
new_weights.weight.data[:self.num_classes] = self.linear.weight.data
new_weights.bias.data[:self.num_classes] = self.linear.bias.data
# self.prev_weights.weight.data[:self.num_classes] = self.linear.weight.data
# self.prev_weights.bias.data[:self.num_classes] = self.linear.bias.data
# self.linear.weight = self.weight_new#nn.Linear(in_features, out_features)
#self.linear.weight.data.copy_(new_weights.weight.data)
#elf.linear.bias.data.copy_(new_weights.bias.data)
#new_fc.sigma.data = self.fc.sigma.data
from torch.nn.parameter import Parameter
self.linear = new_weights.cuda()
self.linear.requires_grad=True
self.num_classes = out_features
# return prev_weights
def features(self, x: torch.Tensor) -> torch.Tensor:
"""
Returns the non-activated output of the second-last layer.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (??)
"""
out = self._features(x)
out = avg_pool2d(out, out.shape[2])
feat = out.view(out.size(0), -1)
return feat
def prev_logit(self, x: torch.Tensor) -> torch.Tensor:
"""
Returns the non-activated output of the second-last layer.
:param x: input tensor (batch_size, *input_shape)
:return: output tensor (??)
"""
out = self.prev_weights(x)
return out
def get_params(self) -> torch.Tensor:
"""
Returns all the parameters concatenated in a single tensor.
:return: parameters tensor (??)
"""
params = []
for pp in list(self.parameters()):
params.append(pp.view(-1))
return torch.cat(params)
def set_params(self, new_params: torch.Tensor) -> None:
"""
Sets the parameters to a given value.
:param new_params: concatenated values to be set (??)
"""
assert new_params.size() == self.get_params().size()
progress = 0
for pp in list(self.parameters()):
cand_params = new_params[progress: progress +
torch.tensor(pp.size()).prod()].view(pp.size())
progress += torch.tensor(pp.size()).prod()
pp.data = cand_params
def get_grads(self) -> torch.Tensor:
"""
Returns all the gradients concatenated in a single tensor.
:return: gradients tensor (??)
"""
grads = []
for pp in list(self.parameters()):
grads.append(pp.grad.view(-1))
return torch.cat(grads)
def resnet18(nclasses: int, nf: int = 64) -> ResNet:
"""
Instantiates a ResNet18 network.
:param nclasses: number of output classes
:param nf: number of filters
:return: ResNet network
"""
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf=64)
| 11,584 | 37.108553 | 151 | py |
GSA | GSA-main/GSA_CVPR/conf.py | # Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
import numpy as np
from abc import abstractmethod
from argparse import Namespace
from torch import nn as nn
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from typing import Tuple
from torchvision import datasets
import numpy as np
def get_device() -> torch.device:
"""
Returns the GPU device if available else CPU.
"""
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def base_path() -> str:
"""
Returns the base bath where to log accuracies and tensorboard data.
"""
return './data/'
def set_random_seed(seed: int) -> None:
"""
Sets the seeds at a certain value.
:param seed: the value to be set
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class ContinualDataset:
"""
Continual learning evaluation setting.
"""
NAME = None
SETTING = None
N_CLASSES_PER_TASK = None
N_TASKS = None
TRANSFORM = None
def __init__(self, args: Namespace) -> None:
"""
Initializes the train and test lists of dataloaders.
:param args: the arguments which contains the hyperparameters
"""
self.train_loader = None
self.test_loaders = []
self.i = 0
self.args = args
@abstractmethod
def get_data_loaders(self) -> Tuple[DataLoader, DataLoader]:
"""
Creates and returns the training and test loaders for the current task.
The current training loader and all test loaders are stored in self.
:return: the current training and test loaders
"""
pass
@abstractmethod
def not_aug_dataloader(self, batch_size: int) -> DataLoader:
"""
Returns the dataloader of the current task,
not applying data augmentation.
:param batch_size: the batch size of the loader
:return: the current training loader
"""
pass
@staticmethod
@abstractmethod
def get_backbone() -> nn.Module:
"""
Returns the backbone to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_transform() -> transforms:
"""
Returns the transform to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_loss() -> nn.functional:
"""
Returns the loss to be used for to the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_normalization_transform() -> transforms:
"""
Returns the transform used for normalizing the current dataset.
"""
pass
@staticmethod
@abstractmethod
def get_denormalization_transform() -> transforms:
"""
Returns the transform used for denormalizing the current dataset.
"""
pass
| 3,209 | 25.311475 | 107 | py |
GSA | GSA-main/GSA_CVPR/test_cifar100.py | import ipaddress
import sys, argparse
import numpy as np
import torch
from torch.nn.functional import relu, avg_pool2d
from buffer import Buffer
# import utils
import datetime
from torch.nn.functional import relu
import torch
import torch.nn as nn
import torch.nn.functional as F
from CSL import tao as TL
from CSL import classifier as C
from CSL.utils import normalize
from CSL.contrastive_learning import get_similarity_matrix, NT_xent, Supervised_NT_xent, SupConLoss
import torch.optim.lr_scheduler as lr_scheduler
from CSL.shedular import GradualWarmupScheduler
import torch
import torchvision.transforms as transforms
import torchvision
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0, help='(default=%(default)d)')
parser.add_argument('--experiment', default='cifar-10', type=str, required=False, help='(default=%(default)s)')
parser.add_argument('--lr', default=0.02, type=float, required=False, help='(default=%(default)f)')
parser.add_argument('--parameter', type=str, default='', help='(default=%(default)s)')
parser.add_argument('--dataset', type=str, default='cifar', help='(default=%(default)s)')
parser.add_argument('--input_size', type=str, default=[3, 32, 32], help='(default=%(default)s)')
parser.add_argument('--buffer_size', type=int, default=1000, help='(default=%(default)s)')
parser.add_argument('--gen', type=str, default=True, help='(default=%(default)s)')
parser.add_argument('--p1', type=float, default=0.1, help='(default=%(default)s)')
parser.add_argument('--cuda', type=str, default='1', help='(default=%(default)s)')
parser.add_argument('--n_classes', type=int, default=512, help='(default=%(default)s)')
parser.add_argument('--buffer_batch_size', type=int, default=64, help='(default=%(default)s)')
args = parser.parse_args()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ignore warning
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda # use gpu0,1
def flip_inner(x, flip1, flip2):
num = x.shape[0]
# print(num)
a = x # .permute(0,1,3,2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
if flip1:
s1 = torch.flip(s1, (3,)) # torch.rot90(s1, 2*rot1, (2, 3))
if flip2:
s2 = torch.flip(s2, (3,)) # torch.rot90(s2, 2*rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
# imshow(torchvision.utils.make_grid(s[2]))
# print("s",s.shape)
# S = s.permute(0,1, 2, 3, 4) # .view(3,32,32)
# print("S",S.shape)
S = s.reshape(num, 3, 32, 32)
# S =S.permute(0,1,3,2)
# imshow(torchvision.utils.make_grid(S[2]))
# print("S", S.shape)
return S
def RandomFlip(x, num):
# print(x.shape)
#aug_x = simclr_aug(x)
x=simclr_aug(x)
X = []
# print(x.shape)
# for i in range(4):
X.append(x)
X.append(flip_inner(x, 1, 1))
X.append(flip_inner(x, 0, 1))
X.append(flip_inner(x, 1, 0))
# else:
# x1=rot_inner(x,0,1)
return torch.cat([X[i] for i in range(num)], dim=0)
def rot_inner(x, rot1, rot2):
num = x.shape[0]
# print(num)
a = x.permute(0, 1, 3, 2)
a = a.view(num, 3, 2, 16, 32)
# imshow(torchvision.utils.make_grid(a))
a = a.permute(2, 0, 1, 3, 4)
s1 = a[0] # .permute(1,0, 2, 3)#, 4)
s2 = a[1] # .permute(1,0, 2, 3)
# print("a",a.shape,a[:63][0].shape)
s1 = torch.rot90(s1, 2 * rot1, (2, 3))
s2 = torch.rot90(s2, 2 * rot2, (2, 3))
s = torch.cat((s1.unsqueeze(2), s2.unsqueeze(2)), dim=2)
S = s.reshape(num, 3, 32, 32)
S = S.permute(0, 1, 3, 2)
return S
def Rotation(x, r):
# print(x.shape)
x = torch.rot90(x, r, (2, 3))
X = []
# print(x.shape)
X.append(rot_inner(x, 0, 0))
X.append(rot_inner(x, 1, 1))
X.append(rot_inner(x, 1, 0))
X.append(rot_inner(x, 0, 1))
return x
oop = 4
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
########################################################################################################################
# Seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
print('[CUDA unavailable]')
sys.exit()
import cifar as dataloader
from Resnet18 import resnet18 as b_model
from buffer import Buffer as buffer
# imagenet200 import SequentialTinyImagenet as STI
from torch.optim import Adam, SGD # ,SparseAdam
import torch.nn.functional as F
from copy import deepcopy
import matplotlib.pyplot as plt
def test_model_cur(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,2*(i):2*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-2*i
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model_past(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,:2*(i+1)]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model_future(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)[:,2*i:]
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
target=target-2*i
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def test_model(loder, i):
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = Basic_model.forward(data)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(i,
test_loss, correct, num,
100. * correct / num, ))
return test_accuracy
def get_true_prob(x, y, llabel):
num = x.size()[0]
true = []
true2 = []
for i in range(num):
if y[i] in llabel:
true.append(1)
else:
true.append(0)
# true.append(x[i][y[i]])
# true2.append(0.5)
# true.append(x[i][y[i]])
return torch.FloatTensor(true).cuda() # ,#torch.FloatTensor(true2).cuda()
def get_prob_rate(x, logits, label):
num = x.size()[0]
logits = F.softmax(logits, dim=1)
rate = []
# true2=[]
for i in range(num):
true_prob = logits[i][label[i]].item()
max_prob = torch.max(logits[i])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_prob_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
def get_mean_rate_cross( logits, label, t):
logits = F.softmax(logits, dim=1)
rate = []
num = logits.size()[0]
# true2=[]
# import pdb
# pdb.set_trace()
for i in range(num):
true_prob = logits[i][label[i]].item()
# import pdb
# pdb.set_trace()
max_prob = torch.max(logits[i, :-t])
rate.append(true_prob / max_prob)
return torch.FloatTensor(rate).cuda()
print('Load data...')
num_class_per_task=10
data, taskcla, inputsize, Loder, test_loder = dataloader.get_cifar100_10(seed=args.seed)
data2, taskcla2, inputsize2, Loder2, test_loder2 = dataloader.get_cifar100_100d(seed=args.seed)
print('Input size =', inputsize, '\nTask info =', taskcla)
buffero = buffer(args).cuda()
Basic_model = b_model(num_class_per_task).cuda()
llabel = {}
Optimizer = Adam(Basic_model.parameters(), lr=0.001, betas=(0.9, 0.99),
weight_decay=1e-4) # SGD(Basic_model.parameters(), lr=0.02, momentum=0.9)
from apex import amp
Basic_model, Optimizer = amp.initialize(Basic_model, Optimizer,opt_level="O1")
hflip = TL.HorizontalFlipLayer().cuda()
cutperm = TL.CutPerm().cuda()
with torch.no_grad():
resize_scale = (0.6, 1.0) # resize scaling factor,default [0.08,1]
# if P.resize_fix: # if resize_fix is True, use same scale
# resize_scale = (P.resize_factor, P.resize_factor)
# Align augmentation
# color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8).cuda()
color_gray = TL.RandomColorGrayLayer(p=0.2).cuda()
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=[32, 32, 3]).cuda()
simclr_aug = transform = torch.nn.Sequential(color_gray, resize_crop,
# color_jitter, # 这个不会变换大小,但是会变化通道值,新旧混杂
# resize_crop,
)
#color_gray, # 这个也不会,混搭
# resize_crop,
# for n,w in Basic_model.named_parameters():
# print(n,w.shape)
Max_acc = []
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
class_holder = []
class_prototype = {}
buffer_per_class = 7
flip_num = 2
negative_logits_SUM = None
positive_logits_SUM = None
num_SUM = 0
Category_sum=None
import pdb
#pdb.set_trace()
for run in range(1):
# rank = torch.randperm(len(Loder))
rank = torch.arange(0,10)#tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
for i in range(len(Loder)):
new_class_holder = []
print(i)
task_id = i
prev_index=True
if i > 0:
Basic_model.change_output_dim(num_class_per_task)
Category_sum = torch.cat((Category_sum, torch.zeros(num_class_per_task)))
negative_logits_SUM = torch.cat(
(negative_logits_SUM, torch.zeros(num_class_per_task).cuda()))
positive_logits_SUM = torch.cat(
(positive_logits_SUM, torch.zeros(num_class_per_task).cuda()))
# Category_sum = torch.cat((Category_sum, torch.zeros(num_class_per_task)))
# negative_logits_SUM = torch.cat((negative_logits_SUM, torch.zeros(num_class_per_task).cuda()))
# positive_logits_SUM = torch.cat((positive_logits_SUM, torch.zeros(num_class_per_task).cuda()))
#if task_id>=2:
# for name,param in Basic_model.named_parameters():
# if "layer1.0" in name:
# param.requires_grad=False
# if "layer2.0" in name:
# param.requires_grad=False
# if "layer3.0" in name:
# param.requires_grad=False
train_loader = Loder[rank[i].item()]['train']
negative_logits_sum=None
positive_logits_sum=None
sum_num=0
category_sum = None
for epoch in range(1):
Basic_model.train()
num_d = 0
for batch_idx, (x, y) in enumerate(train_loader):
# if batch_idx>=10:
# continue
num_d += x.shape[0]
if num_d % 5000 == 0:
print(num_d, num_d / 10000)
llabel[i] = []
Y = deepcopy(y)
for j in range(len(Y)):
if Y[j] not in class_holder:
class_holder.append(Y[j].detach())
class_prototype[Y[j].detach()] = 0
new_class_holder.append(Y[j].detach())
#if i > 0:
# Basic_model.change_output_dim(num_class_per_task)
# if i > 0:
#Basic_model.change_output_dim(1)
Optimizer.zero_grad()
# if args.cuda:
x, y = x.cuda(), y.cuda()
ori_x = x.detach()
ori_y = y.detach()
x = x.requires_grad_()
# import pdb
# pdb.set_trace()
if batch_idx==0&task_id==0:
cur_x, cur_y,_ = torch.zeros(1),torch.zeros(1),torch.zeros(1)#,None,None#buffero.onlysample(22, task=task_id)
else:
cur_x, cur_y, _,_ = buffero.onlysample(22, task=task_id)
if len(cur_x.shape) > 3:
x = torch.cat((x, cur_x), dim=0)
y = torch.cat((y, cur_y))
images1 = torch.cat([torch.rot90(x, rot, (2, 3)) for rot in range(1)]) # 4B
images2 = torch.cat([torch.rot90(x, rot, (2, 3)) for rot in range(1)]) # 4B
images_pair = torch.cat([images1, simclr_aug(images2)], dim=0) # 8B
labels1 = y.cuda()
# print("LLLL",labels1.shape)
rot_sim_labels = torch.cat([labels1 + 100 * i for i in range(1)], dim=0)
Rot_sim_labels = torch.cat([labels1 + 0 * i for i in range(1)], dim=0)
rot_sim_labels = rot_sim_labels.cuda()
outputs_aux = Basic_model(images_pair, is_simclr=True)
simclr = normalize(outputs_aux) # normalize
sim_matrix = get_similarity_matrix(simclr)
loss_sim1 = Supervised_NT_xent(sim_matrix, labels=rot_sim_labels,
temperature=0.07)
if not buffero.is_empty():
buffer_batch_size = 64
# x = x.requires_grad_()
x = RandomFlip(x, flip_num)
y = y.repeat(flip_num)
x = x.requires_grad_()
hidden_pred = Basic_model.f_train(simclr_aug(x))
pred_y = Basic_model.linear(hidden_pred)
#
t = num_class_per_task#len(new_class_holder)
if task_id>0:
pred_y_new = pred_y[:, -t:]#torch.cat([Basic_model.linear(hidden_pred)[:, :-t].data.detach(),pred_y[:, -t:]],dim=1)
loss_balance = (pred_y[:,:-t]**2).mean()
else:
pred_y_new=pred_y
loss_balance=0
min_y = min(new_class_holder)
y_new = y - num_class_per_task*i#min_y
num_x=ori_y.size()[0]
rate=len(new_class_holder)/len(class_holder)
mem_x, mem_y, logits, bt = buffero.sample(int(buffer_batch_size*(1-rate))*1, exclude_task=task_id)
#if task_id>0:
#distribution = torch.ones(2 * task_id).cuda()
#distribution /= distribution.sum()
# pdb.set_trace()
# if task_id>=3:
# pdb.set_trace()
# mem_x, mem_y, _, bt = buffero.pro_class_sample(int(buffer_batch_size*(1-rate))*1, distribution=distribution)
# index_only = torch.randperm(mem_y_only.size()[0])
# mem_x_only = mem_x_only[index_only][:]
#mem_y_only = mem_y_only[index_only][:]
index_x=ori_x
index_y=ori_y
if len(cur_x.shape) > 3:
index_x = torch.cat((index_x, cur_x), dim=0)
index_y = torch.cat((index_y, cur_y))
all_x = torch.cat((mem_x, index_x), dim=0)
all_y = torch.cat((mem_y, index_y))
# index_cur = torch.randperm(index_y.size()[0])
# index_x = index_x[index_cur][:]
#index_y = index_y[index_cur][:]
# if len(class_holder)>len(new_class_holder):
# prev_hiddens=Previous_model.forward(mem_x)
# cur_hiddens=Basic_model.forward(mem_x)[:,:-len(new_class_holder)]
# cur_logits=torch.sum(F.softmax(Basic_model.forward(mem_x))[:,:-len(new_class_holder)],dim=1)
# _,idx_cur=torch.sort(cur_logits)
# mem_x=mem_x[idx_cur]
# mem_y=mem_y[idx_cur]
# import pdb
# pdb.set_trace()
# logits_cur=F.softmax(Basic_model.forward(ori_x))
# logits_pre=torch.sum(logits_cur[:,:-len(new_class_holder)],dim=1)
# _,idx_pre=torch.sort(logits_pre,descending=True)
# ori_x=ori_x[idx_pre]
# ori_y=ori_y[idx_pre]
mem_x = torch.cat((mem_x[:int(buffer_batch_size*(1-rate))],index_x[:int(buffer_batch_size*rate)]),dim=0)
mem_y = torch.cat((mem_y[:int(buffer_batch_size*(1-rate))],index_y[:int(buffer_batch_size*rate)]))
logits = torch.cat((logits[:int(buffer_batch_size*(1-rate))],Basic_model.f_train(index_x[:int(buffer_batch_size*rate)])),dim=0)
index = torch.randperm(mem_y.size()[0])
mem_x=mem_x[index][:]
mem_y=mem_y[index][:]
logits=logits[index][:]
mem_dif = torch.zeros_like(mem_x)
mem_dif.data = deepcopy(mem_x.data)
loss_div = 0
with torch.no_grad():
from utils import feat_normalized
feat = feat_normalized(Basic_model, mem_x)
feat_all = feat_normalized(Basic_model, all_x)
num = mem_x.shape[0]
# repeat_num=2
# mem_x = mem_x.repeat(repeat_num, 1, 1, 1)
mask_object = feat > 0.5#args.p2
mask_object_2 = feat_all > 0.5#0.5args.p2
for ii in range((task_id) * 2):
# index_mix=[]
index = mem_y == ii
index_dif = all_y != ii # .float()#
if index.sum() > 0:
# for tt in range(repeat_num-1):
# index_mix.append(mem_y==ii+1)
# pdb.set_trace()
random_id = torch.from_numpy(
np.random.choice(index_dif.sum().cpu().item(), index.sum().cpu().item(),
replace=True)).cuda() # torch.randperm(index.sum())
mask_background1 = ((mask_object[index]).float() + (
~mask_object_2[index_dif][random_id]).float() == 2)
mask_background2 = mask_object[index].float() - mask_background1.float()
# pdb.set_trace()
mem_dif[index] = mem_x[:num][index] * (
1 - mask_object[index].float() + mask_background2.float()) + all_x[index_dif][
random_id] * mask_background1
# pdb.set_trace()
# mem_y=mem_y.repeat(repeat_num)
teacher_temperature = 0.1
student_temperature = 0.07
# mem_x = mem_x.requires_grad_()
with torch.no_grad():
hidden_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_x)))
hidden_same_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_x)))
hidden_same_batch = torch.matmul(hidden_same_normal, hidden_normal.t()) / teacher_temperature
relation_sam = F.softmax(hidden_same_batch, dim=0)
mem_dif = mem_dif.requires_grad_()
hidden_dif_normal = normalize(Basic_model.simclr(Basic_model.f_train(mem_dif)))
hidden_dif_batch = torch.matmul(hidden_dif_normal, hidden_normal.t()) / student_temperature
relation_dif = F.log_softmax(hidden_dif_batch, dim=0)
loss_dif = F.kl_div(relation_dif, relation_sam,
reduction='batchmean') # -(relation_sam * torch.nn.functional.log_softmax(relation_dif, 1)).sum()/relation_dif.shape[0]
mem_y = mem_y.reshape(-1)
mem_x = mem_x.requires_grad_()
images1_r = torch.cat([Rotation(mem_x, r) for r in range(1)])
images2_r = torch.cat([Rotation(mem_x, r) for r in range(1)])
images_pair_r = torch.cat([images1_r, simclr_aug(images2_r)], dim=0)
u = Basic_model(images_pair_r, is_simclr=True)
images_out_r = u
simclr_r = normalize(images_out_r)
rot_sim_labels_r = torch.cat([mem_y.cuda() + 100 * i for i in range(1)], dim=0)
sim_matrix_r = get_similarity_matrix(simclr_r)
loss_sim_r = Supervised_NT_xent(sim_matrix_r, labels=rot_sim_labels_r, temperature=0.07)
lo1 = 1 * loss_sim_r + 1*loss_sim1
hidden = Basic_model.f_train(mem_x)
# if len(class_holder) > len(new_class_holder):
# T=2
# loss_kd= 1.0*((hidden-logits)**2).mean()+2.0*((prev_hiddens-cur_hiddens)**2).mean()
#else:
# loss_kd = 1.0*((hidden-logits)**2).mean()
# if len(class_holder) > len(new_class_holder):
# import pdb
# pdb.set_trace()
mem_x = RandomFlip(mem_x, flip_num)
mem_y = mem_y.repeat(flip_num)
y_pred = Basic_model.forward(mem_x)
y_pred_hidden=Basic_model.f_train(mem_x)
loss_old=0
#if i >0:
# pdb.set_trace()
# prev_logits= Previous_model.linear(y_pred_hidden)
# loss_old=F.mse_loss(prev_logits,y_pred[:,:-2])
y_pred_new = y_pred
loss_only=0
# category_matrix_new = torch.zeros(logits_new.shape)
exp_new = torch.exp(y_pred_new)
# positive_matrix = torch.ones_like(exp_new)
# Negative_matrix = torch.ones_like(exp_new)
# for i_v in range(int(exp_new.shape[0])):
# category_matrix_new[i_v][mem_y[i_v]] = 1
# Negative_matrix[i_v][:-len(new_class_holder)] = 1 / (torch.exp(-NT[:-len(new_class_holder)] - 0.1))
# if mem_y[i_v] in new_class_holder:
# continue
#1 / NT[:-len(new_class_holder)]
# else:
# positive_matrix[i_v][mem_y[i_v]] = 1#1/(NT[mem_y[i_v]])
# if mem_y[i_v] in new_class_holder:
# Negative_matrix[i_v][:-len(new_class_holder)] = 1 / NT[:-len(new_class_holder)]
# positive_matrix[i_v][mem_y[i_v]] = 1 # 1 / (NT[mem_y[i_v]])
#else:
# positive_matrix[i_v][mem_y[i_v]] = 1 / (torch.exp(-ANT[mem_y[i_v]] - 0.1))
# pdb.set_trace()
# if task_id > 0:
# print(Negative_matrix)
exp_new = exp_new# * Negative_matrix
# pdb.set_trace()
exp_new_sum = torch.sum(exp_new, dim=1)
logits_new = (exp_new / exp_new_sum.unsqueeze(1))
category_matrix_new = torch.zeros(logits_new.shape)
for i_v in range(int(logits_new.shape[0])):
category_matrix_new[i_v][mem_y[i_v]] = 1
# positive_matrix[i_v][mem_y[i_v]]=0
# if task_id>0:
# import pdb
# pdb.set_trace()
# import pdb
# pdb.set_trace()
positive_prob = torch.zeros(logits_new.shape)
false_prob = deepcopy(logits_new.detach())
for i_t in range(int(logits_new.shape[0])):
false_prob[i_t][mem_y[i_t]] = 0
positive_prob[i_t][mem_y[i_t]] = logits_new[i_t][mem_y[i_t]].detach()
if negative_logits_sum is None:
negative_logits_sum = torch.sum(false_prob, dim=0)
positive_logits_sum = torch.sum(positive_prob, dim=0)
if i == 0:
Category_sum = torch.sum(category_matrix_new, dim=0)
else:
Category_sum += torch.sum(category_matrix_new, dim=0) # .cuda()
category_sum = torch.sum(category_matrix_new, dim=0)
else:
Category_sum += torch.sum(category_matrix_new, dim=0) # .cuda()
negative_logits_sum += torch.sum(false_prob, dim=0)
positive_logits_sum += torch.sum(positive_prob, dim=0)
category_sum += torch.sum(category_matrix_new, dim=0)
if negative_logits_SUM is None:
negative_logits_SUM = torch.sum(false_prob, dim=0).cuda()
positive_logits_SUM = torch.sum(positive_prob, dim=0).cuda()
else:
negative_logits_SUM += torch.sum(false_prob, dim=0).cuda()
positive_logits_SUM += torch.sum(positive_prob, dim=0).cuda()
sum_num += int(logits_new.shape[0])
if batch_idx < 5:
ANT = torch.ones(len(class_holder))
NT = torch.ones(len(class_holder))
else:
# pdb.set_trace()
ANT = (Category_sum.cuda() - positive_logits_SUM).cuda()/negative_logits_SUM.cuda() #/ (Category_sum.cuda() - positive_logits_SUM).cuda()
NT = negative_logits_sum.cuda() / (category_sum - positive_logits_sum).cuda()
ttt = torch.zeros(logits_new.shape)
for qqq in range(mem_y.shape[0]):
if mem_y[qqq]>=len(ANT):
ttt[qqq][mem_y[qqq]] = 1
else:
ttt[qqq][mem_y[qqq]] = 2 / (1+torch.exp(1-(ANT[mem_y[qqq]])))
# if mem_y[qqq] in new_class_holder:
# ttt[qqq][mem_y[qqq]] = 1 # (ANT[mem_y[qqq]])
#else:
# ttt[qqq][mem_y[qqq]] = 1 / (1+torch.exp(-ANT[mem_y[qqq]] - 1))
# logits_new==logits_new_p
#import pdb
#pdb.set_trace()
# if len(class_holder) > len(new_class_holder):
# identity_matrix_new=torch.ones(logits_new.shape)
# logits_=F.softmax(y_pred_new,dim=1)
#if batch_idx>0:
# ANT=negative_logits_SUM.cuda() / (Category_sum.cuda() - positive_logits_SUM).cuda()
#.detach()
# aaa=F.nll_loss(torch.log(logits_new),mem_y)
# if batch_idx>3:
# pdb.set_trace()
#+0.05#1+torch.exp(-mem_y[qqq].float())
# print(ttt)
loss_n=-torch.sum(torch.log(logits_new)*ttt.cuda())/mem_y.shape[0]
loss =2* loss_n + 1 * F.cross_entropy(
pred_y_new, y_new)#+loss_balance#+2*loss_sim_r+loss_sim1#+loss_dif#+loss_old#+2*loss_only
else:
x = RandomFlip(x, flip_num)
y = y.repeat(flip_num)
x = x.requires_grad_()
hidden_pred = Basic_model.f_train(simclr_aug(x))
pred_y = Basic_model.linear(hidden_pred)
t = num_class_per_task#len(new_class_holder)
pred_y_new = pred_y[:, -t:]
min_y = num_class_per_task*i#min(new_class_holder)
y_new = y - min_y
loss = F.cross_entropy(pred_y_new, y_new)
copy_x = ori_x
copy_y = ori_y.unsqueeze(1)
copy_hidden = Basic_model.f_train(copy_x).detach()
with amp.scale_loss(loss, Optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
Optimizer.step()
buffero.add_reservoir(x=copy_x.detach(), y=copy_y.squeeze(1).detach(), logits=copy_hidden.float().detach(),
t=i)
weights_path = 'weights_pre.pt'
torch.save(Basic_model.state_dict(), weights_path)
Previous_model = deepcopy(Basic_model)
print(len(class_holder))
# import pdb
# pdb.set_trace()
#if task_id>0:
print(negative_logits_SUM.cuda(),(Category_sum.cuda()-positive_logits_SUM).cuda(),category_sum,sum_num,negative_logits_SUM.cuda()/(Category_sum.cuda()-positive_logits_SUM).cuda())
for j in range(i + 1):
print("ori", rank[j].item())
a = test_model(Loder[rank[j].item()]['test'], j)
if j == i:
Max_acc.append(a)
if a > Max_acc[j]:
Max_acc[j] = a
# if task_id>=1:
# import pdb
# pdb.set_trace()
import pdb
class_acc=[]
for j in range(100):
acc = test_model(Loder2[j]['test'], j)
class_acc.append(acc)
print(class_acc,'!')
pdb.set_trace()
print('=' * 100)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'GPU ' + os.environ["CUDA_VISIBLE_DEVICES"])
print('=' * 100)
import pdb
test_loss = 0
correct = 0
num = 0
for batch_idx, (data, target) in enumerate(test_loder):
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
Basic_model.eval()
pred = F.softmax(Basic_model.forward(data),dim=1)
Pred = pred.data.max(1, keepdim=True)[1]
num += data.size()[0]
# print("final", Pred, target.data.view_as(Pred))
# print(target,"True",pred)
correct += Pred.eq(target.data.view_as(Pred)).cpu().sum()
test_accuracy = 100. * correct / num # len(data_loader.dataset)
print(
'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'
.format(
test_loss, correct, num,
100. * correct / num, ))
print(Max_acc)
import pdb
pdb.set_trace()
n = 0
sum = 0
for m in range(len(Max_acc)):
sum += Max_acc[m]
n += 1
print(sum / n)
| 33,352 | 38.65874 | 187 | py |
GSA | GSA-main/GSA_CVPR/cifar.py | import os,sys
import numpy as np
import torch
#import utils
from torchvision import datasets,transforms
from sklearn.utils import shuffle
import torch.utils.data as Data
def get(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_/'):
os.makedirs('./data/binary_cifar_')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 3, 32, 32))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_pretrain_AOP(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
import clip
device = "cuda" if torch.cuda.is_available() else "cpu"
import pdb
# pdb.set_trace()
model, preprocess = clip.load('ViT-B/32', device)
if not os.path.isdir('./data/binary_cifar_pretr/'):
os.makedirs('./data/binary_cifar_pretr')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
print(t,"t")
num=0
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
num+=1
if num%100==0:
print(num)
# import pdb
# pdb.set_trace()
with torch.no_grad():
image=transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image=image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
with torch.no_grad():
image = transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image = image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, 512)
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
# pdb.set_trace()
return data, taskcla[:10 // data[0]['ncla']], size
def get_pretrain(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
import clip
device = "cuda" if torch.cuda.is_available() else "cpu"
import pdb
# pdb.set_trace()
model, preprocess = clip.load('ViT-B/32', device)
if not os.path.isdir('./data/binary_cifar_pretr/'):
os.makedirs('./data/binary_cifar_pretr')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
print(t,"t")
num=0
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
num+=1
if num%100==0:
print(num)
# import pdb
# pdb.set_trace()
with torch.no_grad():
image=transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image=image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
with torch.no_grad():
image = transforms.ToPILImage()(image.squeeze(0))
image_input = preprocess(image).unsqueeze(0).to(device)
image_features = model.encode_image(image_input)
image = image_features.squeeze(0)
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, 512)
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_p'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_p'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 512)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 512))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=32,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
#import pdb
#pdb.set_trace()
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=32,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_a_order(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_a1/'):
os.makedirs('./data/binary_cifar_a1')
t_num = 2
np.random.seed(101)
cls_list = [i for i in range(10)]
np.random.shuffle(cls_list)
class_mapping = np.array(cls_list, copy=True)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if cls_list.index(label) in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(cls_list.index(label))
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(cls_list.index(label))
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_a1'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_a1'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_a1'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_a1'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
Loder_test={}
for t in range(5):
print("t",t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder_test[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
u2 = torch.tensor(data[t]['test']['x'].reshape(-1, 3, 32, 32))
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
#Loder[t]['valid'] = valid_loader
# mean = [x / 255 for x in [125.3, 123.0, 113.9]]
# std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
# test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
# transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dataset_new_test = Data.TensorDataset(data[5]['test']['x'], data[5]['test']['y'])
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
#test_loader = torch.utils.data.DataLoader(
# test_dataset,
# batch_size=64,
# shuffle=True,
#)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_revisit(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
t_num=2
# CIFAR10
if not os.path.isdir('./data/binary_cifar_/'):
os.makedirs('./data/binary_cifar_')
t_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_num):
data[t] = {}
data[t]['name'] = 'cifar10-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_num
data[t] = {}
data[t]['name'] = 'cifar10-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar_'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar_'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar10->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n=0
for t in data.keys():
print("T",t)
taskcla.append((t, data[t]['ncla']))
n+=data[t]['ncla']
data['ncla'] = n
Loder={}
for t in range(5):
print("t",t)
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
#u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
for i in range(2):
dataset_new_train = Data.TensorDataset(data[t]['train']['x'][i*int(TOTAL_NUM/2):(i+1)*int(TOTAL_NUM/2)], data[t]['train']['y'][i*int(TOTAL_NUM/2):(i+1)*int(TOTAL_NUM/2)])
#dataset_new_valid = Data.TensorDataset(data[t]['valid']['x'], data[t]['valid']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
Loder[2 * t+ i] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[2*t+i]['train'] = train_loader
#Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_dataset= datasets.CIFAR10('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))#Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset=datasets.CIFAR10('./data/', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10//data[0]['ncla']], size,Loder,test_loader
def get_cifar100(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_100/'):
os.makedirs('./data/binary_cifar100_100')
t_num = 10
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_100'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_100'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(11))
print('Task order =', ids)
for i in range(11):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_100'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_100'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(10):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_joint(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_j/'):
os.makedirs('./data/binary_cifar100_j')
t_num = 100
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_num*t, t_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_j'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_j'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(2))
print('Task order =', ids)
for i in range(2):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_j'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_j'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(1):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_50(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_22/'):
os.makedirs('./data/binary_cifar100_22')
t_class_num = 2
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_2'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_2'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(51))
print('Task order =', ids)
for i in range(51):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_2'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_2'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(50):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_mnist(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [1, 28, 28]
# CIFAR10
if not os.path.isdir('./data/binary_mnist/'):
os.makedirs('./data/binary_mnist')
t_class_num = 2
mean = (0.1307,)
std = (0.3081,)
dat={}
dat['train']=datasets.MNIST('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.MNIST('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(10//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'mnist' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 10 // t_class_num
data[t] = {}
data[t]['name'] = 'mnist-all'
data[t]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_mnist'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_mnist'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(6))
print('Task order =', ids)
for i in range(6):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_mnist'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_mnist'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'mnist->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(5):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 1, 28, 28)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = (0.1307,)
std = (0.3081,)
test_dataset = datasets.MNIST('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_20(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_5/'):
os.makedirs('./data/binary_cifar100_5')
t_class_num = 5
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(21))
print('Task order =', ids)
for i in range(21):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(20):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=32,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_10(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_10/'):
os.makedirs('./data/binary_cifar100_10')
t_class_num = 10
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(100//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 100 // t_class_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_10'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_10'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(11))
print('Task order =', ids)
for i in range(11):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_10'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_10'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(10):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
def get_cifar100_5_5(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 32, 32]
# CIFAR10
if not os.path.isdir('./data/binary_cifar100_5_5/'):
os.makedirs('./data/binary_cifar100_5_5')
t_num = 9
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
dat['train']=datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(9):
print(t)
if t==0:
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num * t) + '-' + str(t_num * (t + 1) - 1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(0, 60):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
else:
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_num*t) + '-' + str(t_num*(t+1)-1)
data[t]['ncla'] = t_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
class_num={}
for i in range(60+5*(t-1),60+5*t):
class_num[i]=0
for image, target in loader:
label = target.numpy()[0]
if label in range(60+5*(t-1), 60+5*t):
if class_num[label]<5:
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
class_num[label]+=1
else:
continue
t = 100 // t_num
data[t] = {}
data[t]['name'] = 'cifar100-all'
data[t]['ncla'] = 100
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(9))
print('Task order =', ids)
for i in range(9):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_cifar100_5_5'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(9):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 32, 32)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
# dataset_new_valid = Data.TensorDataset(data[t]['valid']['x'], data[t]['valid']['y'])
if t==0:
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=128,
shuffle=True,
)
else:
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=25,
shuffle=True,
)
Loder[t]['train'] = train_loader
# Loder[t]['valid'] = valid_loader
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# test_dataset = datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose(
# [transforms.ToTensor(), transforms.Normalize(mean,
# std)])) # Data.TensorDataset(data[10//t_num]['test']['x'], data[10//t_num]['test']['y'])
'''
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_dataset=datasets.CIFAR10('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=64,
shuffle=True,
)
Loder={}
Loder[0] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
Loder[0]['train']=train_loader
'''
test_dataset = datasets.CIFAR100('./data/', train=False, download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=2000,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
from tinyimagenet import MyTinyImagenet
from conf import base_path
def get_tinyimagenet_100(seed=0,pc_valid=0.10):
data = {}
taskcla = []
size = [3, 64, 64]
# CIFAR10
if not os.path.isdir('./data/binary_tiny200_222/'):
os.makedirs('./data/binary_tiny200_222')
t_class_num = 2
#mean = [x / 255 for x in [125.3, 123.0, 113.9]]
#std = [x / 255 for x in [63.0, 62.1, 66.7]]
dat={}
transform = transforms.Normalize((0.4802, 0.4480, 0.3975),
(0.2770, 0.2691, 0.2821))
test_transform = transforms.Compose(
[transforms.ToTensor(), transform])
train = MyTinyImagenet(base_path() + 'TINYIMG',
train=True, download=True, transform=test_transform)
# train = datasets.CIFAR100('Data/', train=True, download=True)
test = MyTinyImagenet(base_path() + 'TINYIMG',
train=False, download=True, transform=test_transform)
dat['train']=train#datasets.CIFAR100('./data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
dat['test']=test #datasets.CIFAR100('./data/', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean,std)]))
for t in range(200//t_class_num):
print(t)
data[t] = {}
data[t]['name'] = 'cifar100-' + str(t_class_num*t) + '-' + str(t_class_num*(t+1)-1)
data[t]['ncla'] = t_class_num
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
if label in range(t_class_num*t, t_class_num*(t+1)):
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
t = 200 // t_class_num
data[t] = {}
data[t]['name'] = 'tiny200-all'
data[t]['ncla'] = 200
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[t][s] = {'x': [], 'y': []}
for image, target in loader:
label = target.numpy()[0]
data[t][s]['x'].append(image)
data[t][s]['y'].append(label)
# "Unify" and save
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view(-1, size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view(-1)
torch.save(data[t][s]['x'],
os.path.join(os.path.expanduser('./data/binary_tiny200_22'), 'data' + str(t) + s + 'x.bin'))
torch.save(data[t][s]['y'],
os.path.join(os.path.expanduser('./data/binary_tiny200_22'), 'data' + str(t) + s + 'y.bin'))
# Load binary files
data = {}
ids = list(np.arange(101))
print('Task order =', ids)
for i in range(101):
data[i] = dict.fromkeys(['name','ncla','train','test'])
for s in ['train','test']:
data[i][s]={'x':[],'y':[]}
data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('./data/binary_tiny200_22'),'data'+str(ids[i])+s+'x.bin'))
data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('./data/binary_tiny200_22'),'data'+str(ids[i])+s+'y.bin'))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
data[i]['name'] = 'cifar100->>>' + str(i * data[i]['ncla']) + '-' + str(data[i]['ncla'] * (i + 1) - 1)
# Others
n = 0
for t in data.keys():
print("T", t)
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
Loder = {}
for t in range(100):
print("t", t)
Loder[t] = dict.fromkeys(['name', 'ncla', 'train', 'test', 'valid'])
u1 = torch.tensor(data[t]['train']['x'].reshape(-1, 3, 64, 64)) # .item()
# print("u1",u1.size())
TOTAL_NUM = u1.size()[0]
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
# u1.size()[0]
# u2=torch.tensor(data[t]['train']['y'].reshape(-1))
# u3 = data[t]['valid']['x']
# print("u3",u3.size(),s)
# u4=data[t]['valid']['y']
dataset_new_train = Data.TensorDataset(data[t]['train']['x'], data[t]['train']['y'])
dataset_new_test = Data.TensorDataset(data[t]['test']['x'], data[t]['test']['y'])
train_loader = torch.utils.data.DataLoader(
dataset_new_train,
batch_size=10,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
dataset_new_test,
batch_size=64,
shuffle=True,
)
Loder[t]['train'] = train_loader
Loder[t]['test'] = test_loader
transform = transforms.Normalize((0.4802, 0.4480, 0.3975),
(0.2770, 0.2691, 0.2821))
test_transform = transforms.Compose(
[transforms.ToTensor(), transform])
test = MyTinyImagenet(base_path() + 'TINYIMG',
train=False, download=True, transform=test_transform)
test_loader = torch.utils.data.DataLoader(
test,
batch_size=64,
shuffle=True,
)
print("Loder is prepared")
return data, taskcla[:10 // data[0]['ncla']], size, Loder, test_loader
| 75,193 | 45.04654 | 239 | py |
GSA | GSA-main/GSA_CVPR/CSL/base_model.py | from abc import *
import torch.nn as nn
import torch
import torch.nn.functional as F
class BaseModel(nn.Module, metaclass=ABCMeta):
def __init__(self, last_dim=300, num_classes=10, simclr_dim=400):
super(BaseModel, self).__init__()
self.linear = nn.Linear(last_dim, num_classes)
self.out_num=1
self.weight3 = nn.Parameter(torch.Tensor(3 + self.out_num, 300))
self.simclr_layer = nn.Sequential(
nn.Linear(last_dim, last_dim),
nn.ReLU(),
nn.Linear(last_dim, simclr_dim),
)
self.shift_cls_layer = nn.Linear(last_dim, 4)
self.joint_distribution_layer = nn.Linear(last_dim, 4 * num_classes)
@abstractmethod
def penultimate(self, inputs, all_features=False):
pass
def forward(self, inputs, penultimate=False, simclr=False, shift=False):
_aux = {}
_return_aux = False
features = self.penultimate(inputs)#这里是MLP最后一层
#print("feature",features.shape)
output = F.linear(features,self.weight3)#再跑一个线性层,变成分类任务
if penultimate:
_return_aux = True
_aux['penultimate'] = features#这里跑的是没head的输出
if simclr:
_return_aux = True
_aux['simclr'] = self.simclr_layer(features)#这里跑的是simclr,128
if shift:
_return_aux = True
_aux['shift'] = self.shift_cls_layer(features)#这里是预测shift,4
if _return_aux:
return output[:,:self.out_num], _aux
return output[:,:self.out_num]
| 1,540 | 29.82 | 76 | py |
GSA | GSA-main/GSA_CVPR/CSL/tao.py | import math
import numbers
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
if torch.__version__ >= '1.4.0':
kwargs = {'align_corners': False}
else:
kwargs = {}
def rgb2hsv(rgb):
"""Convert a 4-d RGB tensor to the HSV counterpart.
Here, we compute hue using atan2() based on the definition in [1],
instead of using the common lookup table approach as in [2, 3].
Those values agree when the angle is a multiple of 30°,
otherwise they may differ at most ~1.2°.
References
[1] https://en.wikipedia.org/wiki/Hue
[2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html
[3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212
"""
r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :]
Cmax = rgb.max(1)[0]
Cmin = rgb.min(1)[0]
delta = Cmax - Cmin
hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b)
hue = (hue % (2 * math.pi)) / (2 * math.pi)
saturate = delta / Cmax
value = Cmax
hsv = torch.stack([hue, saturate, value], dim=1)
hsv[~torch.isfinite(hsv)] = 0.
return hsv
def hsv2rgb(hsv):
"""Convert a 4-d HSV tensor to the RGB counterpart.
>>> %timeit hsv2rgb(hsv)
2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>>> %timeit rgb2hsv_fast(rgb)
298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
>>> torch.allclose(hsv2rgb(hsv), hsv2rgb_fast(hsv), atol=1e-6)
True
References
[1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative
"""
h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]]
c = v * s
n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1)
k = (n + h * 6) % 6
t = torch.min(k, 4 - k)
t = torch.clamp(t, 0, 1)
return v - c * t
class RandomResizedCropLayer(nn.Module):
def __init__(self, size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):
'''
Inception Crop
size (tuple): size of fowarding image (C, W, H)
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
'''
super(RandomResizedCropLayer, self).__init__()
_eye = torch.eye(2, 3)
self.size = size
self.register_buffer('_eye', _eye)
self.scale = scale
self.ratio = ratio
def forward(self, inputs, whbias=None):
_device = inputs.device
N = inputs.size(0)
_theta = self._eye.repeat(N, 1, 1)
if whbias is None:
whbias = self._sample_latent(inputs)
_theta[:, 0, 0] = whbias[:, 0]
_theta[:, 1, 1] = whbias[:, 1]
_theta[:, 0, 2] = whbias[:, 2]
_theta[:, 1, 2] = whbias[:, 3]
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
output = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs)
if self.size is not None:
output = F.adaptive_avg_pool2d(output, self.size)
return output#再次仿射取样,——theta考虑whbias
def _clamp(self, whbias):
w = whbias[:, 0]
h = whbias[:, 1]
w_bias = whbias[:, 2]
h_bias = whbias[:, 3]
# Clamp with scale
w = torch.clamp(w, *self.scale)
h = torch.clamp(h, *self.scale)
# Clamp with ratio
w = self.ratio[0] * h + torch.relu(w - self.ratio[0] * h)
w = self.ratio[1] * h - torch.relu(self.ratio[1] * h - w)
# Clamp with bias range: w_bias \in (w - 1, 1 - w), h_bias \in (h - 1, 1 - h)
w_bias = w - 1 + torch.relu(w_bias - w + 1)
w_bias = 1 - w - torch.relu(1 - w - w_bias)
h_bias = h - 1 + torch.relu(h_bias - h + 1)
h_bias = 1 - h - torch.relu(1 - h - h_bias)
whbias = torch.stack([w, h, w_bias, h_bias], dim=0).t()
return whbias
def _sample_latent(self, inputs):
_device = inputs.device
N, _, width, height = inputs.shape
# N * 10 trial
area = width * height
target_area = np.random.uniform(*self.scale, N * 10) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = np.exp(np.random.uniform(*log_ratio, N * 10))
# If doesn't satisfy ratio condition, then do central crop
w = np.round(np.sqrt(target_area * aspect_ratio))
h = np.round(np.sqrt(target_area / aspect_ratio))
cond = (0 < w) * (w <= width) * (0 < h) * (h <= height)
w = w[cond]
h = h[cond]
cond_len = w.shape[0]
if cond_len >= N:
w = w[:N]
h = h[:N]
else:
w = np.concatenate([w, np.ones(N - cond_len) * width])
h = np.concatenate([h, np.ones(N - cond_len) * height])
w_bias = np.random.randint(w - width, width - w + 1) / width
h_bias = np.random.randint(h - height, height - h + 1) / height
w = w / width
h = h / height
whbias = np.column_stack([w, h, w_bias, h_bias])
whbias = torch.tensor(whbias, device=_device)
return whbias
class HorizontalFlipRandomCrop(nn.Module):
def __init__(self, max_range):
super(HorizontalFlipRandomCrop, self).__init__()
self.max_range = max_range
_eye = torch.eye(2, 3)
self.register_buffer('_eye', _eye)
def forward(self, input, sign=None, bias=None, rotation=None):
_device = input.device
N = input.size(0)
_theta = self._eye.repeat(N, 1, 1)
if sign is None:
sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1
if bias is None:
bias = torch.empty((N, 2), device=_device).uniform_(-self.max_range, self.max_range)
_theta[:, 0, 0] = sign
_theta[:, :, 2] = bias
if rotation is not None:
_theta[:, 0:2, 0:2] = rotation
grid = F.affine_grid(_theta, input.size(), **kwargs).to(_device)
output = F.grid_sample(input, grid, padding_mode='reflection', **kwargs)
return output
def _sample_latent(self, N, device=None):
sign = torch.bernoulli(torch.ones(N, device=device) * 0.5) * 2 - 1
bias = torch.empty((N, 2), device=device).uniform_(-self.max_range, self.max_range)
return sign, bias
class Rotation(nn.Module):
def __init__(self, max_range = 4):
super(Rotation, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_device = input.device
#print(self.prob)
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)#随机四个里生成一个数
output = torch.rot90(input, aug_index, (2, 3))#如果是aug》0,从y轴转向x轴,转90*aug,反之亦然。(2,3)是要转的维度
_prob = input.new_full((input.size(0),), self.prob)#产生一个inputsize大小,值为0.5的tensor,不会加在a上,直接给prob
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)#按照prob中p用beinoulli生成0/1值,实际上是每个样本是否输出的mask
output = _mask * input + (1-_mask) * output#这样做要么是原图像,要么旋转90*aug
else:
aug_index = aug_index % self.max_range
output = torch.rot90(input, aug_index, (2, 3))#旋转角度不mask,原样返回
return output
class CutPerm(nn.Module):
def __init__(self, max_range = 4):
super(CutPerm, self).__init__()
self.max_range = max_range
self.prob = 0.5
def forward(self, input, aug_index=None):
_device = input.device
_, _, H, W = input.size()
if aug_index is None:
aug_index = np.random.randint(4)
output = self._cutperm(input, aug_index)
_prob = input.new_full((input.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
output = _mask * input + (1 - _mask) * output
else:
aug_index = aug_index % self.max_range
output = self._cutperm(input, aug_index)
return output
def _cutperm(self, inputs, aug_index):
_, _, H, W = inputs.size()
h_mid = int(H / 2)
w_mid = int(W / 2)
jigsaw_h = aug_index // 2
jigsaw_v = aug_index % 2
if jigsaw_h == 1:
inputs = torch.cat((inputs[:, :, h_mid:, :], inputs[:, :, 0:h_mid, :]), dim=2)
if jigsaw_v == 1:
inputs = torch.cat((inputs[:, :, :, w_mid:], inputs[:, :, :, 0:w_mid]), dim=3)
return inputs
class HorizontalFlipLayer(nn.Module):
def __init__(self):
"""
img_size : (int, int, int)
Height and width must be powers of 2. E.g. (32, 32, 1) or
(64, 128, 3). Last number indicates number of channels, e.g. 1 for
grayscale or 3 for RGB
"""
super(HorizontalFlipLayer, self).__init__()
_eye = torch.eye(2, 3)#对角矩阵取前两行
self.register_buffer('_eye', _eye)
def forward(self, inputs):
_device = inputs.device
N = inputs.size(0)#batch——size
_theta = self._eye.repeat(N, 1, 1)#重复N份,拼一起
r_sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1#0.5概率生成mask
_theta[:, 0, 0] = r_sign#把mask加入
grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device)
inputs = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs)
return inputs#做一系列仿射变换,得到图像
class RandomColorGrayLayer(nn.Module):
def __init__(self, p):
super(RandomColorGrayLayer, self).__init__()
self.prob = p#0.2
_weight = torch.tensor([[0.299, 0.587, 0.114]])
self.register_buffer('_weight', _weight.view(1, 3, 1, 1))
def forward(self, inputs, aug_index=None):
if aug_index == 0:
return inputs
l = F.conv2d(inputs, self._weight)#卷积处理,只有一个轨道了
gray = torch.cat([l, l, l], dim=1)#通道扩增3倍,得到原来的大小
if aug_index is None:
_prob = inputs.new_full((inputs.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)
gray = inputs * (1 - _mask) + gray * _mask
return gray
class ColorJitterLayer(nn.Module):
def __init__(self, p, brightness, contrast, saturation, hue):
super(ColorJitterLayer, self).__init__()
self.prob = p#0.8
self.brightness = self._check_input(brightness, 'brightness')#[0.6,1.4]
self.contrast = self._check_input(contrast, 'contrast')#[0.6,1.4]
self.saturation = self._check_input(saturation, 'saturation')#[0.6,1.4]
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)#hue 0.8,return[-0.1,0.1]
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]#hue[-0.1,0.1]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def adjust_contrast(self, x):
if self.contrast:
factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast)#
means = torch.mean(x, dim=[2, 3], keepdim=True)#【batch——size,3,1,1】
x = (x - means) * factor + means#【32】【3】每个先减去对应means,再【32】乘以一个【0.6到1.4】中对应数,然后加(1-factor)*means 也是对应【32】加
return torch.clamp(x, 0, 1)#维持在0,1中
def adjust_hsv(self, x):
f_h = x.new_zeros(x.size(0), 1, 1)
f_s = x.new_ones(x.size(0), 1, 1)
f_v = x.new_ones(x.size(0), 1, 1)#生成(batch_size,1,1)的0/1矩阵
if self.hue:
f_h.uniform_(*self.hue)#生成【batch_size,1,1】其中值在-0.1,0.1之间
if self.saturation:
f_s = f_s.uniform_(*self.saturation)#同事,值在0.6到1.4之间
if self.brightness:
f_v = f_v.uniform_(*self.brightness)
return RandomHSVFunction.apply(x, f_h, f_s, f_v)#对每个通道做一些随机HSV变化
def transform(self, inputs):
# Shuffle transform
if np.random.rand() > 0.5:
transforms = [self.adjust_contrast, self.adjust_hsv]
else:
transforms = [self.adjust_hsv, self.adjust_contrast]
for t in transforms:
inputs = t(inputs)#对input随机套两个组合比较是必须的
return inputs
def forward(self, inputs):
_prob = inputs.new_full((inputs.size(0),), self.prob)
_mask = torch.bernoulli(_prob).view(-1, 1, 1, 1)#生成mask
return inputs * (1 - _mask) + self.transform(inputs) * _mask
class RandomHSVFunction(Function):
@staticmethod
def forward(ctx, x, f_h, f_s, f_v):
# ctx is a context object that can be used to stash information
# for backward computation
x = rgb2hsv(x)#从 hsv tensor 变 RGB tensor
h = x[:, 0, :, :]#第一个通道【32,32,32】
h += (f_h * 255. / 360.)#给每个在【32】中的值加f_h*255/360 对应的那个位置的值
h = (h % 1)#求余数
x[:, 0, :, :] = h#第一个通道这样,加法然后取余
x[:, 1, :, :] = x[:, 1, :, :] * f_s#这里只是乘
x[:, 2, :, :] = x[:, 2, :, :] * f_v
x = torch.clamp(x, 0, 1)#裁剪,超过0,1范围的变0/1
x = hsv2rgb(x)#返回
return x
@staticmethod
def backward(ctx, grad_output):
# We return as many input gradients as there were arguments.
# Gradients of non-Tensor arguments to forward must be None.
grad_input = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.clone()
return grad_input, None, None, None
class NormalizeLayer(nn.Module):
"""
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self):
super(NormalizeLayer, self).__init__()
def forward(self, inputs):
return (inputs - 0.5) / 0.5
import torch
from torch import Tensor
from torchvision.transforms.functional import to_pil_image, to_tensor
from torch.nn.functional import conv2d, pad as torch_pad
from typing import Any, List, Sequence, Optional
import numbers
import numpy as np
import torch
from PIL import Image
from typing import Tuple
class GaussianBlur(torch.nn.Module):
"""Blurs image with randomly chosen Gaussian blur.
The image can be a PIL Image or a Tensor, in which case it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
Returns:
PIL Image or Tensor: Gaussian blurred version of the input image.
"""
def __init__(self, kernel_size, sigma=(0.1, 2.0)):
super().__init__()
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
if isinstance(sigma, numbers.Number):
if sigma <= 0:
raise ValueError("If sigma is a single number, it must be positive.")
sigma = (sigma, sigma)
elif isinstance(sigma, Sequence) and len(sigma) == 2:
if not 0. < sigma[0] <= sigma[1]:
raise ValueError("sigma values should be positive and of the form (min, max).")
else:
raise ValueError("sigma should be a single number or a list/tuple with length 2.")
self.sigma = sigma
@staticmethod
def get_params(sigma_min: float, sigma_max: float) -> float:
"""Choose sigma for random gaussian blurring.
Args:
sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
Returns:
float: Standard deviation to be passed to calculate kernel for gaussian blurring.
"""
return torch.empty(1).uniform_(sigma_min, sigma_max).item()
def forward(self, img: Tensor) -> Tensor:
"""
Args:
img (PIL Image or Tensor): image to be blurred.
Returns:
PIL Image or Tensor: Gaussian blurred image
"""
sigma = self.get_params(self.sigma[0], self.sigma[1])
return gaussian_blur(img, self.kernel_size, [sigma, sigma])
def __repr__(self):
s = '(kernel_size={}, '.format(self.kernel_size)
s += 'sigma={})'.format(self.sigma)
return self.__class__.__name__ + s
@torch.jit.unused
def _is_pil_image(img: Any) -> bool:
return isinstance(img, Image.Image)
def _setup_size(size, error_msg):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def _is_tensor_a_torch_image(x: Tensor) -> bool:
return x.ndim >= 2
def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
ksize_half = (kernel_size - 1) * 0.5
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
kernel1d = pdf / pdf.sum()
return kernel1d
def _cast_squeeze_in(img: Tensor, req_dtype: torch.dtype) -> Tuple[Tensor, bool, bool, torch.dtype]:
need_squeeze = False
# make image NCHW
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if out_dtype != req_dtype:
need_cast = True
img = img.to(req_dtype)
return img, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
# it is better to round before cast
img = torch.round(img).to(out_dtype)
return img
def _get_gaussian_kernel2d(
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
return kernel2d
def _gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
"""PRIVATE METHOD. Performs Gaussian blurring on the img by given kernel.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be blurred
kernel_size (sequence of int or int): Kernel size of the Gaussian kernel ``(kx, ky)``.
sigma (sequence of float or float, optional): Standard deviation of the Gaussian kernel ``(sx, sy)``.
Returns:
Tensor: An image that is blurred using gaussian kernel of given parameters
"""
if not (isinstance(img, torch.Tensor) or _is_tensor_a_torch_image(img)):
raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, kernel.dtype)
# padding = (left, right, top, bottom)
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
img = torch_pad(img, padding, mode="reflect")
img = conv2d(img, kernel, groups=img.shape[-3])
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
"""Performs Gaussian blurring on the img by given kernel.
The image can be a PIL Image or a Tensor, in which case it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be blurred
kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
like ``(kx, ky)`` or a single integer for square kernels.
In torchscript mode kernel_size as single int is not supported, use a tuple or
list of length 1: ``[ksize, ]``.
sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
same sigma in both X/Y directions. If None, then it is computed using
``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
Default, None. In torchscript mode sigma as single float is
not supported, use a tuple or list of length 1: ``[sigma, ]``.
Returns:
PIL Image or Tensor: Gaussian Blurred version of the image.
"""
if not isinstance(kernel_size, (int, list, tuple)):
raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size) != 2:
raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma) != 2:
raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
for s in sigma:
if s <= 0.:
raise ValueError('sigma should have positive values. Got {}'.format(sigma))
t_img = img
if not isinstance(img, torch.Tensor):
if not _is_pil_image(img):
raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))
t_img = to_tensor(img)
output = _gaussian_blur(t_img, kernel_size, sigma)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output)
return output
| 23,890 | 36.388106 | 117 | py |
GSA | GSA-main/GSA_CVPR/CSL/utils.py | import os
import pickle
import random
import shutil
import sys
from datetime import datetime
import numpy as np
import torch
from matplotlib import pyplot as plt
from tensorboardX import SummaryWriter
class Logger(object):
"""Reference: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514"""
def __init__(self, fn, ask=True, local_rank=0):
self.local_rank = local_rank
if self.local_rank == 0:
if not os.path.exists("./logs/"):
os.mkdir("./logs/")
logdir = self._make_dir(fn)
if not os.path.exists(logdir):
os.mkdir(logdir)
if len(os.listdir(logdir)) != 0 and ask:
ans = input("log_dir is not empty. All data inside log_dir will be deleted. "
"Will you proceed [y/N]? ")
if ans in ['y', 'Y']:
shutil.rmtree(logdir)
else:
exit(1)
self.set_dir(logdir)
def _make_dir(self, fn):
today = datetime.today().strftime("%y%m%d")
logdir = 'logs/' + fn
return logdir
def set_dir(self, logdir, log_fn='log.txt'):
self.logdir = logdir
if not os.path.exists(logdir):
os.mkdir(logdir)
self.writer = SummaryWriter(logdir)
self.log_file = open(os.path.join(logdir, log_fn), 'a')
def log(self, string):
if self.local_rank == 0:
self.log_file.write('[%s] %s' % (datetime.now(), string) + '\n')
self.log_file.flush()
print('[%s] %s' % (datetime.now(), string))
sys.stdout.flush()
def log_dirname(self, string):
if self.local_rank == 0:
self.log_file.write('%s (%s)' % (string, self.logdir) + '\n')
self.log_file.flush()
print('%s (%s)' % (string, self.logdir))
sys.stdout.flush()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if self.local_rank == 0:
self.writer.add_scalar(tag, value, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
if self.local_rank == 0:
self.writer.add_image(tag, images, step)
def histo_summary(self, tag, values, step):
"""Log a histogram of the tensor of values."""
if self.local_rank == 0:
self.writer.add_histogram(tag, values, step, bins='auto')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def reset(self):
self.value = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.count += n
self.average = self.sum / self.count
def load_checkpoint(logdir, mode='last'):
if mode == 'last':
model_path = os.path.join(logdir, 'last.model')
optim_path = os.path.join(logdir, 'last.optim')
config_path = os.path.join(logdir, 'last.config')
elif mode == 'best':
model_path = os.path.join(logdir, 'best.model')
optim_path = os.path.join(logdir, 'best.optim')
config_path = os.path.join(logdir, 'best.config')
else:
raise NotImplementedError()
print("=> Loading checkpoint from '{}'".format(logdir))
if os.path.exists(model_path):
model_state = torch.load(model_path)
optim_state = torch.load(optim_path)
with open(config_path, 'rb') as handle:
cfg = pickle.load(handle)
else:
return None, None, None
return model_state, optim_state, cfg
def save_checkpoint(epoch, model_state, optim_state, logdir):
last_model = os.path.join(logdir, 'last.model')
last_optim = os.path.join(logdir, 'last.optim')
last_config = os.path.join(logdir, 'last.config')
opt = {
'epoch': epoch,
}
torch.save(model_state, last_model)
torch.save(optim_state, last_optim)
with open(last_config, 'wb') as handle:
pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_linear_checkpoint(logdir, mode='last'):
if mode == 'last':
linear_optim_path = os.path.join(logdir, 'last.linear_optim')
elif mode == 'best':
linear_optim_path = os.path.join(logdir, 'best.linear_optim')
else:
raise NotImplementedError()
print("=> Loading linear optimizer checkpoint from '{}'".format(logdir))
if os.path.exists(linear_optim_path):
linear_optim_state = torch.load(linear_optim_path)
return linear_optim_state
else:
return None
def save_linear_checkpoint(linear_optim_state, logdir):
last_linear_optim = os.path.join(logdir, 'last.linear_optim')
torch.save(linear_optim_state, last_linear_optim)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def normalize(x, dim=1, eps=1e-8):
return x / (x.norm(dim=dim, keepdim=True) + eps)
def make_model_diagrams(probs, labels, n_bins=10):
"""
outputs - a torch tensor (size n x num_classes) with the outputs from the final linear layer
- NOT the softmaxes
labels - a torch tensor (size n) with the labels
"""
confidences, predictions = probs.max(1)
accuracies = torch.eq(predictions, labels)
f, rel_ax = plt.subplots(1, 2, figsize=(4, 2.5))
# Reliability diagram
bins = torch.linspace(0, 1, n_bins + 1)
bins[-1] = 1.0001
width = bins[1] - bins[0]
bin_indices = [confidences.ge(bin_lower) * confidences.lt(bin_upper) for bin_lower, bin_upper in
zip(bins[:-1], bins[1:])]
bin_corrects = [torch.mean(accuracies[bin_index]) for bin_index in bin_indices]
bin_scores = [torch.mean(confidences[bin_index]) for bin_index in bin_indices]
confs = rel_ax.bar(bins[:-1], bin_corrects.numpy(), width=width)
gaps = rel_ax.bar(bins[:-1], (bin_scores - bin_corrects).numpy(), bottom=bin_corrects.numpy(), color=[1, 0.7, 0.7],
alpha=0.5, width=width, hatch='//', edgecolor='r')
rel_ax.plot([0, 1], [0, 1], '--', color='gray')
rel_ax.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='small')
# Clean up
rel_ax.set_ylabel('Accuracy')
rel_ax.set_xlabel('Confidence')
f.tight_layout()
return f
| 6,511 | 30.61165 | 119 | py |
GSA | GSA-main/GSA_CVPR/CSL/classifier.py | import torch.nn as nn
#from models.resnet import ResNet18, ResNet34, ResNet50
#from models.resnet_imagenet import resnet18, resnet50
from CSL import tao as TL
def get_simclr_augmentation(P, image_size):
# parameter for resizecrop
#P.resize_fix = False
resize_scale = (P.resize_factor, 1.0) # resize scaling factor,default [0.08,1]
# if P.resize_fix: # if resize_fix is True, use same scale
# resize_scale = (P.resize_factor, P.resize_factor)
# Align augmentation
color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8)
color_gray = TL.RandomColorGrayLayer(p=0.2)
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=image_size)
# Transform define #
print("P",P.dataset)
if P.dataset == 'imagenet': # Using RandomResizedCrop at PIL transform
transform = nn.Sequential(
color_jitter,
color_gray,
)
elif P.dataset =='split_mnist':
print("MNOSTYYY")
transform = nn.Sequential(
# 这个不会变换大小,但是会变化通道值,新旧混杂
# 这个也不会,混搭
resize_crop, # 再次仿射取样,不会变大小
)
elif P.dataset== "mnist":
transform = nn.Sequential(
# 这个不会变换大小,但是会变化通道值,新旧混杂
# 这个也不会,混搭
resize_crop, # 再次仿射取样,不会变大小
)
elif P.dataset=="cifar10":
transform = nn.Sequential(
color_jitter,#这个不会变换大小,但是会变化通道值,新旧混杂
color_gray,#这个也不会,混搭
resize_crop,#再次仿射取样,不会变大小
)
return transform
def get_shift_module(P, eval=False):
if P.shift_trans_type == 'rotation':
shift_transform = TL.Rotation()
K_shift = 4
elif P.shift_trans_type == 'cutperm':
shift_transform = TL.CutPerm()
K_shift = 4
else:
shift_transform = nn.Identity()
K_shift = 1#啥也不做,one_class=1
if not eval and not ('sup' in P.mode):
assert P.batch_size == int(128/K_shift)
return shift_transform, K_shift
def get_shift_classifer(model, K_shift):
model.shift_cls_layer = nn.Linear(model.last_dim, K_shift)#改成预测4类shift
return model
def get_classifier(mode, n_classes=10):
if mode == 'resnet18':
classifier = ResNet18(num_classes=n_classes)
elif mode == 'resnet34':
classifier = ResNet34(num_classes=n_classes)
elif mode == 'resnet50':
classifier = ResNet50(num_classes=n_classes)
elif mode == 'resnet18_imagenet':
classifier = resnet18(num_classes=n_classes)
elif mode == 'resnet50_imagenet':
classifier = resnet50(num_classes=n_classes)
else:
raise NotImplementedError()
return classifier
| 2,686 | 27.585106 | 100 | py |
GSA | GSA-main/GSA_CVPR/CSL/shedular.py | from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 3,069 | 46.96875 | 152 | py |
GSA | GSA-main/GSA_CVPR/CSL/contrastive_learning.py | import torch
import torch.distributed as dist
import diffdist.functional as distops
import torch.nn as nn
import torch.nn.functional as F
def get_similarity_matrix(outputs, chunk=2, multi_gpu=False):
'''
Compute similarity matrix
- outputs: (B', d) tensor for B' = B * chunk
- sim_matrix: (B', B') tensor
'''
if multi_gpu:
outputs_gathered = []
for out in outputs.chunk(chunk):
gather_t = [torch.empty_like(out) for _ in range(dist.get_world_size())]
gather_t = torch.cat(distops.all_gather(gather_t, out))
outputs_gathered.append(gather_t)
outputs = torch.cat(outputs_gathered)
#sim_matrix = F.cosine_similarity(outputs.unsqueeze(1), outputs.unsqueeze(0), dim=-1)
sim_matrix = torch.mm(outputs, outputs.t()) # (B', d), (d, B') -> (B', B')#这里是sim(z(x),z(x'))
return sim_matrix
def NT_xent(sim_matrix, temperature=0.5, chunk=2, eps=1e-8):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
#print(temperature)
B = sim_matrix.size(0) // chunk # B = B' / chunk#256/2=128
# C=B//4
#print("sim0",sim_matrix)
eye = torch.eye(B * chunk).to(device) # (B', B')#对焦矩阵【256,256】
sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal#exp(【256】/0.5)然后去掉中间相同的
#print("sim1",sim_matrix)#对角线是自己乘自己,没用,一直是1
denom = torch.sum(sim_matrix, dim=1, keepdim=True)#第一维求和这里应该就是分母。
#print("I",I)
sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix除以分母再取log
#print("sim2",sim_matrix)
loss = torch.sum(sim_matrix[:B, B:].diag() + sim_matrix[B:, :B].diag()) / (2 * B)#取对角线上的元素和的平均做loss
#分两块是表示对称
return loss
def Supervised_NT_xent(sim_matrix, labels, temperature=0.5, chunk=2, eps=1e-8, multi_gpu=False):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
labels1 = labels
labels1 = labels1.repeat(2)
logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True)
sim_matrix = sim_matrix - logits_max.detach()
B = sim_matrix.size(0) // chunk # B = B' / chunk
eye = torch.eye(B * chunk).to(device) # (B', B')
sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal
denom = torch.sum(sim_matrix, dim=1, keepdim=True)
sim_matrix = -torch.log(sim_matrix/(denom+eps)+eps) # loss matrix
labels1 = labels1.contiguous().view(-1, 1)
Mask1 = torch.eq(labels1, labels1.t()).float().to(device)
Mask1 = Mask1 / (Mask1.sum(dim=1, keepdim=True) + eps)
a = 1
b = 1 # all is 1 means 2:1,-0.5&1 is1:2 no,all 1 is 1+1/n:n-1/n
# print(a,b)
#print("Ma",Mask.shape,sim_matrix.shape)
loss1 = torch.sum(Mask1 * sim_matrix) / (2 * B)
Loss = a * (torch.sum(sim_matrix[:B, B:].diag() + sim_matrix[B:, :B].diag()) / (2 * B)) + b * loss1#+1*loss2
return Loss
def Sup(sim_matrix, labels, temperature=0.5, chunk=2, eps=1e-8, multi_gpu=False):
'''
Compute NT_xent loss
- sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples)
'''
device = sim_matrix.device
labels1 = labels
if multi_gpu:
gather_t = [torch.empty_like(labels1) for _ in range(dist.get_world_size())]
labels = torch.cat(distops.all_gather(gather_t, labels1))
labels1 = labels1.repeat(2)
#labels2 = labels1.repeat(2)
print("0",sim_matrix)
#logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True)
#print("lll", logits_max.shape, sim_matrix.shape)
#sim_matrix = sim_matrix - logits_max.detach()
#I=torch.zeros([sim_matrix.shape[0],sim_matrix.shape[0]])+1
#I=I.cuda()
#print("ee1",sim_matrix.shape)
B = sim_matrix.size(0) // chunk # B = B' / chunk
#print("BBB",B,chunk)
eye = torch.eye(B * chunk).to(device) # (B', B')
#sim_matrix = sim_matrix * (1 - eye) # remove diagonal
#print("ee2", sim_matrix.shape)
#denom = torch.sum(sim_matrix, dim=1, keepdim=True)
print("1",sim_matrix)
sim_matrix = -torch.log(torch.max(sim_matrix,eps)[0])*(1-eye) # loss matrix
print("2",sim_matrix)
#print("ee3", sim_matrix.shape)
labels1 = labels1.contiguous().view(-1, 1)
#labels2 = labels2.contiguous().view(-1, 1)
#print("LLLL",labels)
Mask1 = torch.eq(labels1, labels1.t()).float().to(device)
#Mask2 = torch.eq(labels1, labels1.t()).float().to(device)
#print("mmm",Mask)
#Mask = eye * torch.stack([labels == labels[i] for i in range(labels.size(0))]).float().to(device)
Mask1 = Mask1 / (Mask1.sum(dim=1, keepdim=True) + eps)
#Mask2 = Mask2 / (Mask2.sum(dim=1, keepdim=True) + eps)
# print("M",Mask1)
#print("MMMM", Mask.shape, Mask.shape)
# loss = torch.sum(Mask * sim_matrix) / (2 * B)
a = 1
#b = 1 # all is 1 means 2:1,-0.5&1 is1:2 no,all 1 is 1+1/n:n-1/n
# print(a,b)
#print("Ma",Mask.shape,sim_matrix.shape)
loss1 = torch.sum(Mask1 * sim_matrix) / (2 * B)
#print(loss1)
#loss2 = torch.sum(Mask2 * sim_matrix) / (2 * B)
#print(sim_matrix)
# print(torch.sum(sim_matrix[:B, :B].diag() + sim_matrix[B:, B:].diag()) / (2 * B),"loss")
# and balance question
Loss = a* loss1#+1*loss2
# loss = torch.sum(Mask * sim_matrix) / (2 * B)
return Loss
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)#和我们的不一样
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
# labels = labels.repeat(2)
labels = labels.contiguous().view(-1, 1)
# print("L",labels.shape,batch_size)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)#对每个数据,将它和所有数据从第一个到尾比较,标签同则1否则0获得对同label的mask标签
# print("mask",mask)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]#2
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)#从第一个维度view全部切开,然后再拼起来
# print("c_f",contrast_feature.shape)#还原
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature#是否考虑不同view,其实我们的就是考虑不同view的版本
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)#计算内积
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()#为了数值稳定性减去值
# tile mask
mask = mask.repeat(anchor_count, contrast_count)#repeat是扩增操作,横扩增2倍纵扩增两倍几个views repeat几次
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)#生成一个logits_mask,其实就是一个对角元素0其余是1的矩阵
#print("log",logits_mask.shape)
mask = mask * logits_mask#去掉和自己的比较
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask#去掉和自己的比较
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))#构建分子除分母
#print(log_prob.shape,mask.sum(0),mask.sum(1))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)#同类别的pair取平均
#print(mean_log_prob_pos.shape)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()#单个view
return loss
| 9,602 | 37.107143 | 112 | py |
GSA | GSA-main/GSA_CVPR/CSL/general_loss.py | import torch
import numpy
def generalized_contrastive_loss(
hidden1,
hidden2,
lambda_weight=0.5,
temperature=0.5,
dist='normal',
hidden_norm=True,
loss_scaling=2.0):
"""Generalized contrastive loss.
Both hidden1 and hidden2 should have shape of (n, d).
Configurations to get following losses:
* decoupled NT-Xent loss: set dist='logsumexp', hidden_norm=True
* SWD with normal distribution: set dist='normal', hidden_norm=False
* SWD with uniform hypersphere: set dist='normal', hidden_norm=True
* SWD with uniform hypercube: set dist='uniform', hidden_norm=False
"""
hidden_dim = hidden1.shape[-1] # get hidden dimension
#print(hidden_dim)
#print(hidden1.shape)
if hidden_norm:
hidden1 = hidden1 / (hidden1.norm(dim=1, keepdim=True) + 1e-8)#torchtf.math.l2_normalize(hidden1, -1)
hidden2 = hidden2 / (hidden2.norm(dim=1, keepdim=True) + 1e-8)
loss_align = torch.mean((hidden1 - hidden2)**2)/2
#print(loss_align)
hiddens = torch.cat([hidden1, hidden2], 0)
#print(hiddens.shape)
if dist == 'logsumexp':
loss_dist_match = get_logsumexp_loss(hiddens, temperature)
else:
a = torch.empty([hidden_dim, hidden_dim]).normal_(0, 1).cuda()
rand_w = torch.nn.init.orthogonal_(a).cuda()
#print("a",a==rand_w)
#rand_w=a
# print("send",rand_w)
# print("rand",rand_w.shape)
#initializer = torch.nn.init.orthogonal()# tf.keras.initializers.Orthogonal()
#rand_w = initializer([hidden_dim, hidden_dim])
loss_dist_match = get_swd_loss(hiddens, rand_w,
prior=dist,
hidden_norm=hidden_norm)
a= loss_scaling * (-loss_align + lambda_weight * loss_dist_match)
#print("a",loss_dist_match)
return a,loss_align,loss_dist_match
def get_logsumexp_loss(states, temperature):
scores = torch.matmul(states, states.t()) .cuda() # (bsz, bsz)
bias = torch.log(torch.tensor(states.shape[1]).float()).cuda()
#print(bias)
# eye = torch.eye(scores.shape[1]).cuda()# a constant
return torch.mean(torch.log(torch.sum(torch.exp(scores / temperature),dim=1)+1e-8).cuda()).cuda()
def sort(x):
"""Returns the matrix x where each row is sorted (ascending)."""
u = x.detach().cpu().numpy()
t = numpy.argsort(u, axis=1)
p = torch.from_numpy(t).long().cuda()
b = torch.gather(x, -1, p)
return b
'''
xshape = x.shape
print(xshape[1])
rank = torch.sum((x.unsqueeze(2) > x.unsqueeze(1)), dim=2).cuda()
print("r",rank)
for i in range(128):
for j in range(128):
if rank[i][j] < 0:
print(rank[i][j])
elif rank[i][j] >= 128:
print("r",rank[i][j])
rank_inv = torch.einsum(
'dbc,c->db',
torch.Tensor.permute(torch.nn.functional.one_hot(rank.long(), xshape[1]), [0, 2, 1]).float().cuda(),
torch.arange(xshape[1]).float().cuda()).cuda() # (dim, bsz)
# x = gather_nd(x, rank_inv.int(), axis=-1, batch_dims=-1)
q= torch.nn.functional.one_hot(rank, xshape[1]).transpose(2,1).float().cpu()
print("a")
#q=torch.from_numpy(numpy.transpose(torch.nn.functional.one_hot(rank, xshape[1]).int().cpu().numpy(), [0, 2, 1])).float().cuda()
for i in range(128):
print(torch.sum(q[31][i]),i)
print(q.shape)
print(torch.sum(q[31][60]))
t = torch.matmul(q[31][60], torch.arange(xshape[1]).float())
print(t)
t = numpy.array(t.cpu())
q = numpy.array(q.cpu())
#numpy.savetxt('/home/guoyd/Dataset/np2.txt', t)
numpy.savetxt('/home/guoyd/Dataset/np.txt', q[31][60])
# t=torch.matmul(q[31],torch.arange(xshape[1]).float().cuda())
# torch.arange(xshape[1]).float().cuda().cuda())
#print("rr",q==rank_inv)
#l=[]
# w=False
# s=0
for i in range(128):
for j in range(128):
if rank_inv[i][j]<0:
print(rank_inv[i][j])
elif rank_inv[i][j]>=128:
print(rank_inv[i][j],i,j)
w=True
s=i
#for s in range(128):
# print(rank_inv[31][s])
#if w:
# for j in range(128):
# l.append(rank_inv[s][j])
#l=l.sort()
#for i in range(128):
# print(l[i])
p=list(rank_inv[s][:])
p.sort()
n=0
for i in range(len(p)):
print(p[i],len(p),n)
n=n+1
#print(rank_inv[i][s])
b = torch.gather(x, -1, rank_inv.long().cuda())
#print("b",b)
'''
# return b
def get_swd_loss(states, rand_w, prior='normal', stddev=1., hidden_norm=True):
states_shape = states.shape
#print("get", rand_w)
states = torch.matmul(states, rand_w)
#print("get", rand_w)
states_t = sort(states.t())
#print("get2",states_t)# (dim, bsz)
#print("get", rand_w)
#print("t",states_t)
#print("p",prior)
if prior == 'normal':
states_prior = torch.empty(states_shape).normal_(mean=1e-6,std=1+1e-8)#torch.randn(states_shape, mean=0, stddev=stddev)
elif prior == 'uniform':
states_prior = torch.empty(states_shape).uniform_(-1.0,1.0)
else:
raise ValueError('Unknown prior {}'.format(prior))
#print("s", states_prior)
if hidden_norm:
states_prior = states_prior / (states_prior.norm(dim=1, keepdim=True) + 1e-8)
#tf.math.l2_normalize(states_prior, -1)
#print("get", rand_w)
states_prior = torch.matmul(states_prior.cuda(), rand_w)
# print("S", states_prior)
states_prior_t = sort(states_prior.t()) # (dim, bsz)
#print("ss",states_prior_t)
#a=torch.mean((states_prior_t - states_t)**2)
#print("los",states_prior_t-states_t)
return torch.mean((states_prior_t - states_t)**2)
'''
def get_contrastive_loss(z1, z2, nt_xent_temp): # [batch_size, dim]
batch_size = tf.shape(z1)[0]
dim = tf.shape(z1)[1]
z1 = tf.math.l2_normalize(z1, -1)
z2 = tf.math.l2_normalize(z2, -1)
sim = tf.matmul(z1, z2, transpose_b=True) # [batch_size, batch_size]
sim /= nt_xent_temp
labels = tf.eye(batch_size)
loss = (
get_cls_loss(labels, sim) +
get_cls_loss(labels, tf.transpose(sim))
)
return tf.reduce_mean(loss), sim
def get_cls_loss(labels, outputs):
return tf.reduce_mean(cls_loss_object(labels, outputs))
cls_loss_object = tf.keras.losses.CategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
'''
| 6,102 | 30.621762 | 130 | py |
FMLD | FMLD-main/mask-test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 22:57:43 2020
@author: borut batagelj
"""
import os
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
from torch import nn
# Applying Transforms to the Data
image_transforms = {
'test': transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
# Load the Data
dataset = 'faces'
test_directory = os.path.join(dataset, 'test')
# Batch size
bs = 128
# Number of classes
num_classes = 2
# Load Data from folders
data = {
'test': datasets.ImageFolder(root=test_directory, transform=image_transforms['test']),
}
class_names = data['test'].classes
transform=image_transforms['test']
# Get a mapping of the indices to the class names, in order to see the output classes of the test images.
idx_to_class = {v: k for k, v in data['test'].class_to_idx.items()}
print('Classes: ',idx_to_class)
# Size of Data, to be used for calculating Average Loss and Accuracy
test_data_size = len(data['test'])
# Create iterators for the Data loaded using DataLoader module
test_data_loader = DataLoader(data['test'], batch_size=bs, shuffle=False)
# Print the test set data sizes
print('Number of faces: ',test_data_size)
def computeTestSetAccuracy(model, loss_criterion, data_loader, data_size):
'''
Function to compute the accuracy on the test set
Parameters
:param model: Model to test
:param loss_criterion: Loss Criterion to minimize
'''
test_acc = 0.0
test_loss = 0.0
# Validation - No gradient tracking needed
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for j, (inputs, labels) in enumerate(data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
#loss = loss_criterion(outputs, labels)
# Compute the total loss for the batch and add it to valid_loss
#test_loss += loss.item() * inputs.size(0)
# Calculate validation accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to valid_acc
test_acc += acc.item() * inputs.size(0)
# Find average test loss and test accuracy
#avg_test_loss = test_loss/data_size
avg_test_acc = test_acc/data_size
return avg_test_acc
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loss_func = nn.CrossEntropyLoss() #for a multi-class classification problem
model_file = 'resnet152.pt'
if os.path.exists(model_file):
model = torch.load(model_file)
model = model.to(device)
avg_test_acc=computeTestSetAccuracy(model, loss_func, test_data_loader, test_data_size)
print("Test accuracy : " + str(avg_test_acc))
else:
print("Warrning: No Pytorch model for classification: resnet152.pt. Please Download it from GitHub link.\n")
| 3,464 | 28.615385 | 112 | py |
GNNImpute | GNNImpute-main/GNNImpute/layer.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def layer(layer_type, **kwargs):
if layer_type == 'GCNConv':
return GraphConvolution(in_features=kwargs['in_channels'], out_features=kwargs['out_channels'])
elif layer_type == 'GATConv':
return MultiHeadAttentionLayer(in_features=kwargs['in_channels'], out_features=kwargs['out_channels'],
heads=kwargs['heads'], concat=kwargs['concat'])
class GraphConvolution(torch.nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout=0.6, alpha=0.2, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)
e = self._prepare_attentional_mechanism_input(Wh)
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
# Wh.shape (N, out_feature)
# self.a.shape (2 * out_feature, 1)
# Wh1&2.shape (N, 1)
# e.shape (N, N)
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.T
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, heads, concat=True):
super(MultiHeadAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.attentions = [GraphAttentionLayer(in_features, out_features, concat=concat) for _ in range(heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = torch.cat([torch.unsqueeze(att(x, adj), 0) for att in self.attentions])
x = torch.mean(x, dim=0)
return x
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 4,298 | 35.74359 | 111 | py |
GNNImpute | GNNImpute-main/GNNImpute/utils.py | import torch
import numpy as np
import scanpy as sc
import scipy.sparse as sp
from sklearn.decomposition import PCA
from sklearn.neighbors import kneighbors_graph
def normalize(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
# if size_factors or normalize_input or logtrans_input:
# adata.raw = adata.copy()
# else:
# adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def train_val_split(adata, train_size=0.6, val_size=0.2, test_size=0.2):
assert train_size + val_size + test_size == 1
adata = adata.copy()
cell_nums = adata.n_obs
test_val = np.random.choice(cell_nums, int(cell_nums * (val_size + test_size)), replace=False)
idx_train = [i for i in list(range(cell_nums)) if i not in test_val]
idx_test = np.random.choice(test_val, int(len(test_val) * (test_size / (val_size + test_size))), replace=False)
idx_val = [i for i in test_val if i not in idx_test]
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_train] = True
adata.obs['idx_train'] = tmp
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_val] = True
adata.obs['idx_val'] = tmp
tmp = np.zeros(cell_nums, dtype=bool)
tmp[idx_test] = True
adata.obs['idx_test'] = tmp
return adata
def row_normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def kneighbor(adata, n_components=50, k=5):
pca = PCA(n_components=n_components)
data_pca = pca.fit_transform(adata.X)
A = kneighbors_graph(data_pca, k, mode='connectivity', include_self=True)
return row_normalize(A)
def adata2gdata(adata, use_raw=True):
adj = kneighbor(adata, n_components=50, k=5)
adj = torch.tensor(adj.A, dtype=torch.float)
features = torch.tensor(adata.X, dtype=torch.float)
labels = torch.tensor(adata.X, dtype=torch.float)
size_factors = torch.tensor(adata.obs.size_factors, dtype=torch.float).reshape(-1, 1)
if use_raw:
labels = torch.tensor(adata.raw.X.A, dtype=torch.float)
train_mask = torch.tensor(adata.obs.idx_train, dtype=torch.bool)
val_mask = torch.tensor(adata.obs.idx_val, dtype=torch.bool)
return {
'x': features,
'y': labels,
'size_factors': size_factors,
'adj': adj,
'train_mask': train_mask,
'val_mask': val_mask
}
| 2,914 | 28.15 | 115 | py |
GNNImpute | GNNImpute-main/GNNImpute/model.py | import torch
import torch.nn.functional as F
from .layer import layer
class GNNImpute(torch.nn.Module):
def __init__(self, input_dim, h_dim=512, z_dim=50, layerType='GATConv', heads=3):
super(GNNImpute, self).__init__()
#### Encoder ####
self.encode_conv1 = layer(layerType, in_channels=input_dim, out_channels=h_dim,
heads=heads, concat=False)
self.encode_bn1 = torch.nn.BatchNorm1d(h_dim)
self.encode_conv2 = layer(layerType, in_channels=h_dim, out_channels=z_dim,
heads=heads, concat=False)
self.encode_bn2 = torch.nn.BatchNorm1d(z_dim)
#### Decoder ####
self.decode_linear1 = torch.nn.Linear(z_dim, h_dim)
self.decode_bn1 = torch.nn.BatchNorm1d(h_dim)
self.decode_linear2 = torch.nn.Linear(h_dim, input_dim)
def encode(self, x, edge_index):
x = F.relu(self.encode_bn1(self.encode_conv1(x, edge_index)))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.encode_bn2(self.encode_conv2(x, edge_index)))
x = F.dropout(x, p=0.5, training=self.training)
return x
def decode(self, x):
x = F.relu(self.decode_bn1(self.decode_linear1(x)))
x = F.relu(self.decode_linear2(x))
return x
def forward(self, x, edge_index, size_factors):
z = self.encode(x, edge_index)
x = self.decode(z)
x = x * size_factors
return x
| 1,491 | 32.155556 | 87 | py |
GNNImpute | GNNImpute-main/GNNImpute/train.py | import os
import time
import glob
import torch
def train(gdata, model,
no_cuda=False,
epochs=3000,
lr=0.001,
weight_decay=0.0005,
patience=200,
fastmode=False,
verbose=True):
device = torch.device('cuda' if torch.cuda.is_available() and not no_cuda else 'cpu')
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
lossFunc = torch.nn.MSELoss(reduction='mean')
for key in gdata.keys():
gdata[key] = gdata[key].to(device)
def train_wrapper(epoch):
model.train()
optimizer.zero_grad()
pred = model(gdata['x'], gdata['adj'], gdata['size_factors'])
dropout_pred = pred[gdata['train_mask']]
dropout_true = gdata['y'][gdata['train_mask']]
loss_train = lossFunc(dropout_pred, dropout_true)
loss_train.backward()
optimizer.step()
if not fastmode:
model.eval()
pred = model(gdata['x'], gdata['adj'], gdata['size_factors'])
dropout_pred = pred[gdata['val_mask']]
dropout_true = gdata['y'][gdata['val_mask']]
loss_val = lossFunc(dropout_pred, dropout_true)
if (epoch + 1) % 10 == 0 and verbose:
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(loss_train.data.item()),
'loss_val: {:.4f}'.format(loss_val.data.item()))
return loss_val.data.item()
t_total = time.time()
loss_values = []
bad_counter = 0
best = float('inf')
best_epoch = 0
for epoch in range(epochs):
loss_values.append(train_wrapper(epoch))
if loss_values[-1] < best:
torch.save(model.state_dict(), '{}.pkl'.format(epoch))
best = loss_values[-1]
best_epoch = epoch
bad_counter = 0
else:
bad_counter += 1
if bad_counter == patience:
break
files = glob.glob('*.pkl')
for file in files:
epoch_nb = int(file.split('.')[0])
if epoch_nb != best_epoch:
os.remove(file)
print('Total time elapsed: {:.4f}s'.format(time.time() - t_total))
# Restore best model
model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))
| 2,321 | 27.317073 | 89 | py |
dcstfn | dcstfn-master/experiment/run.py | import sys
sys.path.append('..')
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import argparse
from functools import partial
import json
from keras import optimizers
from pathlib import Path
from toolbox.data import load_train_set
from toolbox.model import get_model
from toolbox.experiment import Experiment
parser = argparse.ArgumentParser()
parser.add_argument('config', type=Path)
args = parser.parse_args()
param = json.load(args.config.open())
# Model
scale = param['scale']
build_model = partial(get_model(param['model']['name']),
**param['model']['params'])
if 'optimizer' in param:
optimizer = getattr(optimizers, param['optimizer']['name'].lower())
optimizer = optimizer(**param['optimizer']['params'])
else:
optimizer = 'adam'
lr_block_size = tuple(param['lr_block_size'])
# Data
load_train_set = partial(load_train_set,
lr_sub_size=param['lr_sub_size'],
lr_sub_stride=param['lr_sub_stride'])
# Training
expt = Experiment(scale=param['scale'], load_set=load_train_set,
build_model=build_model, optimizer=optimizer,
save_dir=param['save_dir'])
print('training process...')
expt.train(train_set=param['train_set'], val_set=param['val_set'],
epochs=param['epochs'], resume=True)
# Evaluation
print('evaluation process...')
for test_set in param['test_sets']:
expt.test(test_set=test_set, lr_block_size=lr_block_size)
| 1,517 | 28.192308 | 71 | py |
dcstfn | dcstfn-master/toolbox/experiment.py | from functools import partial
from pathlib import Path
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.utils.vis_utils import plot_model
from keras.preprocessing.image import img_to_array
from osgeo import gdal_array
from toolbox.data import data_dir, load_image_pairs, load_test_set
from toolbox.metrics import psnr, r2
class Experiment(object):
def __init__(self, scale=16, load_set=None, build_model=None,
optimizer='adam', save_dir='.'):
self.scale = scale
self.load_set = partial(load_set, scale=scale)
self.build_model = partial(build_model)
self.optimizer = optimizer
self.save_dir = Path(save_dir)
self.save_dir.mkdir(parents=True, exist_ok=True)
self.config_file = self.save_dir / 'config.yaml'
self.model_file = self.save_dir / 'model.hdf5'
self.visual_file = self.save_dir / 'model.eps'
self.train_dir = self.save_dir / 'train'
self.train_dir.mkdir(exist_ok=True)
self.history_file = self.train_dir / 'history.csv'
self.weights_dir = self.train_dir / 'weights'
self.weights_dir.mkdir(exist_ok=True)
self.test_dir = self.save_dir / 'test'
self.test_dir.mkdir(exist_ok=True)
def weights_file(self, epoch=None):
if epoch is None:
return self.weights_dir / 'ep{epoch:04d}.hdf5'
else:
return self.weights_dir / 'ep{:04d}.hdf5'.format(epoch)
@property
def latest_epoch(self):
try:
return pd.read_csv(str(self.history_file))['epoch'].iloc[-1]
except (FileNotFoundError, pd.io.common.EmptyDataError):
pass
return -1
@staticmethod
def _ensure_dimension(array, dim):
while len(array.shape) < dim:
array = array[np.newaxis, ...]
return array
@staticmethod
def _ensure_channel(array, c):
return array[..., c:c + 1]
@staticmethod
def validate(array):
array = Experiment._ensure_dimension(array, 4)
array = Experiment._ensure_channel(array, 0)
return array
def compile(self, model):
"""Compile model with default settings."""
model.compile(optimizer=self.optimizer, loss='mse', metrics=[psnr, r2])
return model
def train(self, train_set, val_set, epochs=10, resume=True):
# Load and process data
x_train, y_train = self.load_set(train_set)
x_val, y_val = self.load_set(val_set)
assert len(x_train) == 3 and len(x_val) == 3
for i in range(3):
x_train[i], x_val[i] = [self.validate(x) for x in [x_train[i], x_val[i]]]
y_train, y_val = [self.validate(y) for y in [y_train, y_val]]
# Compile model
model = self.compile(self.build_model(*x_train))
model.summary()
self.config_file.write_text(model.to_yaml())
plot_model(model, to_file=str(self.visual_file), show_shapes=True)
# Inherit weights
if resume:
latest_epoch = self.latest_epoch
if latest_epoch > -1:
weights_file = self.weights_file(epoch=latest_epoch)
model.load_weights(str(weights_file))
initial_epoch = latest_epoch + 1
else:
initial_epoch = 0
# Set up callbacks
callbacks = []
callbacks += [ModelCheckpoint(str(self.model_file))]
callbacks += [ModelCheckpoint(str(self.weights_file()),
save_weights_only=True)]
callbacks += [CSVLogger(str(self.history_file), append=resume)]
# Train
model.fit(x_train, y_train, batch_size=320, epochs=epochs, callbacks=callbacks,
validation_data=(x_val, y_val), initial_epoch=initial_epoch)
# Plot metrics history
prefix = str(self.history_file).rsplit('.', maxsplit=1)[0]
df = pd.read_csv(str(self.history_file))
epoch = df['epoch']
for metric in ['Loss', 'PSNR', 'R2']:
train = df[metric.lower()]
val = df['val_' + metric.lower()]
plt.figure()
plt.plot(epoch, train, label='train')
plt.plot(epoch, val, label='val')
plt.legend(loc='best')
plt.xlabel('Epoch')
plt.ylabel(metric)
plt.savefig('.'.join([prefix, metric.lower(), 'eps']))
plt.close()
def test(self, test_set, lr_block_size=(20, 20), metrics=[psnr, r2]):
print('Test on', test_set)
output_dir = self.test_dir / test_set
output_dir.mkdir(exist_ok=True)
# Evaluate metrics on each image
rows = []
for image_path in (data_dir / test_set).glob('*'):
if image_path.is_dir():
rows += [self.test_on_image(image_path, output_dir, lr_block_size=lr_block_size, metrics=metrics)]
df = pd.DataFrame(rows)
# Compute average metrics
row = pd.Series()
row['name'] = 'average'
for col in df:
if col != 'name':
row[col] = df[col].mean()
df = df.append(row, ignore_index=True)
df.to_csv(str(self.test_dir / '{}/metrics.csv'.format(test_set)))
def test_on_image(self, image_dir, output_dir, lr_block_size=(20, 20), metrics=[psnr, r2]):
# Load images
print('loading image pairs from {}'.format(image_dir))
input_images, valid_image = load_image_pairs(image_dir, scale=self.scale)
assert len(input_images) == 3
name = input_images[-1].filename.name if hasattr(input_images[-1], 'filename') else ''
print('Predict on image {}'.format(name))
# Generate output image and measure run time
# x_inputs的shape为四数组(数目,长度,宽度,通道数)
x_inputs = [self.validate(img_to_array(im)) for im in input_images]
assert x_inputs[0].shape[1] % lr_block_size[0] == 0
assert x_inputs[0].shape[2] % lr_block_size[1] == 0
x_train, _ = load_test_set((input_images, valid_image),
lr_block_size=lr_block_size, scale=self.scale)
model = self.compile(self.build_model(*x_train))
if self.model_file.exists():
model.load_weights(str(self.model_file))
t_start = time.perf_counter()
y_preds = model.predict(x_train, batch_size=1) # 结果的shape为四维
# 预测结束后进行恢复
y_pred = np.empty(x_inputs[1].shape[-3:], dtype=np.float32)
row_step = lr_block_size[0] * self.scale
col_step = lr_block_size[1] * self.scale
rows = x_inputs[0].shape[2] // lr_block_size[1]
cols = x_inputs[0].shape[1] // lr_block_size[0]
count = 0
for j in range(rows):
for i in range(cols):
y_pred[i * row_step: (i + 1) * row_step, j * col_step: (j + 1) * col_step] = y_preds[count]
count += 1
assert count == rows * cols
t_end = time.perf_counter()
# Record metrics
row = pd.Series()
row['name'] = name
row['time'] = t_end - t_start
y_true = self.validate(img_to_array(valid_image))
y_pred = self.validate(y_pred)
for metric in metrics:
row[metric.__name__] = K.eval(metric(y_true, y_pred))
prototype = str(valid_image.filename) if hasattr(valid_image, 'filename') else None
gdal_array.SaveArray(y_pred[0].squeeze().astype(np.int16),
str(output_dir / name),
prototype=prototype)
return row
| 7,661 | 37.119403 | 114 | py |
dcstfn | dcstfn-master/toolbox/model.py | import keras.layers
from keras.layers import Input, Conv2D, Conv2DTranspose, MaxPooling2D, Dense
from keras.models import Model, Sequential
##################################################################
# Deep Convolutional SpatioTemporal Fusion Network (DCSTFN)
##################################################################
def dcstfn(coarse_input, fine_input, coarse_pred, d=[32, 64, 128]):
pool_size = 2
coarse_model = _htls_cnet(coarse_input, coarse_pred, d)
fine_model = _hslt_cnet(fine_input, d)
# 三个网络的融合
coarse_input_layer = Input(shape=coarse_input.shape[-3:])
coarse_input_model = coarse_model(coarse_input_layer)
fine_input_layer = Input(shape=fine_input.shape[-3:])
fine_input_model = fine_model(fine_input_layer)
subtracted_layer = keras.layers.subtract([fine_input_model, coarse_input_model])
coarse_pred_layer = Input(shape=coarse_pred.shape[-3:])
coarse_pred_model = coarse_model(coarse_pred_layer)
added_layer = keras.layers.add([subtracted_layer, coarse_pred_model])
merged_layer = Conv2DTranspose(d[1], 3, strides=pool_size,
padding='same',
kernel_initializer='he_normal',
activation='relu')(added_layer)
dense_layer = Dense(d[0], activation='relu')(merged_layer)
final_out = Dense(fine_input.shape[-1])(dense_layer)
model = Model([coarse_input_layer, fine_input_layer, coarse_pred_layer], final_out)
return model
def _hslt_cnet(fine_input, d, pool_size=2):
# 对于Landsat高分辨率影像建立网络
fine_model = Sequential()
fine_model.add(Conv2D(d[0], 3, padding='same',
kernel_initializer='he_normal',
activation='relu', input_shape=fine_input.shape[-3:]))
fine_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
fine_model.add(MaxPooling2D(pool_size=pool_size, padding='same'))
fine_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
fine_model.add(Conv2D(d[2], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
return fine_model
def _htls_cnet(coarse_input, coarse_pred, d):
# 对于两张MODIS影像建立相同的网络
assert coarse_input.shape == coarse_pred.shape
coarse_model = Sequential()
coarse_model.add(Conv2D(d[0], 3, padding='same',
kernel_initializer='he_normal',
activation='relu', input_shape=coarse_input.shape[-3:]))
coarse_model.add(Conv2D(d[1], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
for n in [2, 2, 2]:
coarse_model.add(Conv2DTranspose(d[1], 3, strides=n, padding='same',
kernel_initializer='he_normal'))
coarse_model.add(Conv2D(d[2], 3, padding='same',
kernel_initializer='he_normal',
activation='relu'))
return coarse_model
def get_model(name):
"""通过字符串形式的函数名称得到该函数对象,可以直接对该函数进行调用"""
return globals()[name]
| 3,336 | 43.493333 | 87 | py |
dcstfn | dcstfn-master/toolbox/data.py | from pathlib import Path
import numpy as np
from functools import partial
from keras.preprocessing.image import img_to_array
from osgeo import gdal_array
from PIL import Image
repo_dir = Path(__file__).parents[1]
data_dir = repo_dir / 'data'
input_suffix = 'input'
pred_suffix = 'pred'
valid_suffix = 'valid'
modis_prefix = 'MOD09A1'
landsat_prefix = 'LC08'
def gen_patches(image, size, stride=None):
"""将输入图像分割成给定大小的小块"""
if not isinstance(size, tuple):
size = (size, size)
if stride is None:
stride = size
elif not isinstance(stride, tuple):
stride = (stride, stride)
# 这里是列优先
for i in range(0, image.size[0] - size[0] + 1, stride[0]):
for j in range(0, image.size[1] - size[1] + 1, stride[1]):
yield image.crop([i, j, i + size[0], j + size[1]])
def load_image_pairs(directory, scale=16):
"""从指定目录中加载高低分辨率的图像对(包括两幅MODIS影像和两幅Landsat影像)"""
path_list = []
for path in Path(directory).glob('*.tif'):
path_list.append(path)
assert len(path_list) == 4
for path in path_list:
img_name = path.name
if pred_suffix in img_name:
modis_pred_path = path
elif valid_suffix in img_name:
landsat_valid_path = path
elif input_suffix in img_name:
if img_name.startswith(modis_prefix):
modis_input_path = path
elif img_name.startswith(landsat_prefix):
landsat_input_path = path
path_list = [modis_input_path, landsat_input_path, modis_pred_path, landsat_valid_path]
image_list = []
for path in path_list:
data = gdal_array.LoadFile(str(path)).astype(np.int32)
image = Image.fromarray(data)
setattr(image, 'filename', path)
image_list.append(image)
assert image_list[0].size == image_list[0].size
assert image_list[1].size == image_list[1].size
assert image_list[1].size[0] == image_list[0].size[0] * scale
assert image_list[1].size[1] == image_list[0].size[1] * scale
return image_list[:3], image_list[-1]
def sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches):
# samples是当前批次的图片,patches是存储的容器
assert len(samples) == 4
for i in range(4):
if i % 2 == 0:
patches[i] += [img_to_array(img) for img in lr_gen_sub(samples[i])]
else:
patches[i] += [img_to_array(img) for img in hr_gen_sub(samples[i])]
def load_train_set(image_dir, lr_sub_size=10, lr_sub_stride=5, scale=16):
"""从给定的数据目录中加载高低分辨率的数据(根据高分辨率图像采样得到低分辨的图像)"""
hr_sub_size = lr_sub_size * scale
hr_sub_stride = lr_sub_stride * scale
lr_gen_sub = partial(gen_patches, size=lr_sub_size, stride=lr_sub_stride)
hr_gen_sub = partial(gen_patches, size=hr_sub_size, stride=hr_sub_stride)
patches = [[] for _ in range(4)]
for path in (data_dir / image_dir).glob('*'):
if path.is_dir():
print('loading image pairs from {}'.format(path))
samples = load_image_pairs(path, scale=scale)
samples = [*samples[0], samples[1]]
sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches)
for i in range(4):
patches[i] = np.stack(patches[i])
# 返回结果为一个四维的数组(数目,长度,宽度,通道数)
return patches[:3], patches[-1]
def load_test_set(samples, lr_block_size=(20, 20), scale=16):
assert len(samples) == 2
hr_block_size = [m * scale for m in lr_block_size]
lr_gen_sub = partial(gen_patches, size=tuple(lr_block_size))
hr_gen_sub = partial(gen_patches, size=tuple(hr_block_size))
patches = [[] for _ in range(4)]
samples = [*samples[0], samples[1]]
sample_to_array(samples, lr_gen_sub, hr_gen_sub, patches)
for i in range(4):
patches[i] = np.stack(patches[i])
return patches[:3], patches[-1]
| 3,794 | 32 | 91 | py |
dcstfn | dcstfn-master/toolbox/metrics.py | from keras import backend as K
import tensorflow as tf
import numpy as np
def cov(x, y):
return K.mean((x - K.mean(x)) * K.transpose((y - K.mean(y))))
def psnr(y_true, y_pred, data_range=10000):
"""Peak signal-to-noise ratio averaged over samples and channels."""
mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2))
return K.mean(20 * K.log(data_range / K.sqrt(mse)) / np.log(10))
def ssim(y_true, y_pred, data_range=10000):
"""structural similarity measurement system."""
K1 = 0.01
K2 = 0.03
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = cov(y_true, y_pred)
L = data_range
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
return ((2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) /
(mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
def r2(y_true, y_pred):
# mean函数调用了tensor的属性,不能直接是一个ndarray
tf_true = y_true
if not isinstance(y_true, tf.Tensor):
tf_true = tf.convert_to_tensor(y_true)
res = K.sum(K.square(y_true - y_pred))
tot = K.sum(K.square(y_true - K.mean(tf_true)))
return 1 - res / (tot + K.epsilon())
| 1,180 | 25.840909 | 74 | py |
MRI-ROI-prediction | MRI-ROI-prediction-main/lrmain.py | import os
import numpy as np
import time
import glob
import random
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('EXP','temp',"exp. name")
tf.compat.v1.flags.DEFINE_integer('mod', 0, "model") # 0=share, 1=chstack, 2=3D
class ConvNet(object):
def __init__(self):
self.lr = 0.0001
self.batch_size = 1
self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.image_size=512
if FLAGS.mod==1:
from bmbn2D import inference
elif FLAGS.mod==2:
from bmbn import inference
else:
from share import inference
def parser(self,serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.io.parse_single_example(serialized_example,
features={
'top': tf.io.FixedLenFeature([], tf.float32),
'bottom': tf.io.FixedLenFeature([], tf.float32),
'right': tf.io.FixedLenFeature([], tf.float32),
'left': tf.io.FixedLenFeature([], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
}, name='features')
image = tf.io.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [self.image_size,self.image_size,-1])
label = tf.stack([features['top'],features['bottom'],features['right'],features['left']])
return image,label
def get_data(self):
with tf.name_scope('data'):
self.filenames = tf.compat.v1.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(self.filenames)
dataset=dataset.map(self.parser,num_parallel_calls=4)
if FLAGS.mod!=1:
dataset=dataset.batch(1)
else:
dataset=dataset.padded_batch(self.batch_size,padded_shapes=([512,512,40],[4]))
dataset=dataset.shuffle(100)
self.iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self.img, self.label= self.iterator.get_next()
self.img=tf.image.per_image_standardization(self.img)
self.shift = tf.compat.v1.placeholder(tf.int32, name='shift')
self.img=tf.roll(self.img,self.shift,[0,1])
self.label+=tf.cast(self.shift[1],tf.float32)
def loss(self):
with tf.name_scope('loss'):
'''toploss=tf.where(self.label[0,0]>self.logits[1],
2*tf.keras.losses.MSE(self.label[:,0],self.logits[1]),
tf.keras.losses.MSE(self.label[:,0],self.logits[1]))
bottomloss=tf.where(self.label[0,1]<self.logits[0],
2*tf.keras.losses.MSE(self.label[:,1],self.logits[0]),
tf.keras.losses.MSE(self.label[:,1],self.logits[0]))
self.loss=toploss+bottomloss'''
self.loss=tf.keras.losses.MSE(self.label[:,3],(self.logits[0]))+tf.keras.losses.MSE(self.label[:,2],self.logits[1])
def optimize(self):
self.opt = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss,
global_step=self.gstep)
def summary(self):
with tf.name_scope('summaries'):
tf.compat.v1.summary.scalar('loss', self.loss)
self.summary_op = tf.compat.v1.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.loss()
self.optimize()
self.summary()
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
start_time = time.time()
train_filenames=sorted(glob.glob("/mnt/raid5/Loc/trainC/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames: train_filenames})
try:
while True:
shiftof=[-10,-5,0,5,10]
#feedalp=np.(self.img.shape)
dx,dy=(random.choice(shiftof),random.choice(shiftof))
_, l, summaries,tsnr,tscore,img = sess.run([self.opt, self.loss, self.summary_op,self.label,self.logits,self.img], feed_dict={self.drop_prob:0.2, self.shift:[dy,dx]})#self.alpha:feedalp
writer.add_summary(summaries, global_step=step)
if step % 100 == 0:
print('Loss at step {0}: {1}'.format(step, l))
step += 1
except tf.errors.OutOfRangeError:
pass
return step
def eval_once(self, sess, init, writer, step):
eval_filenames=sorted(glob.glob("./testC/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames:eval_filenames})
scores=[]
truepf=[]
IoUs=[]
hIoUs=[]
try:
while True:
score,btrue_pf= sess.run([self.logits,self.label], feed_dict={self.drop_prob:0.0, self.shift:[0,0]})
score=[max(0.0,score[0]),min(512.0,score[1])]
scores+=[score[0],score[1]]
truepf+=[btrue_pf[0][3],btrue_pf[0][2]]
IoUs+=[(min(score[1],btrue_pf[0][2])-max(score[0],btrue_pf[0][3]))/(max(score[1],btrue_pf[0][2])-min(score[0],btrue_pf[0][3]))]
except tf.errors.OutOfRangeError:
pass
print('score= ', scores, 'label= ', truepf)
pf_error=np.mean(abs(np.array(scores)-np.array(truepf)))
IoU=np.mean(np.array(IoUs))
evalsum = tf.compat.v1.Summary()
evalsum.value.add(tag='pf_error', simple_value=pf_error)
evalsum.value.add(tag='IoU', simple_value=IoU)
writer.add_summary(evalsum, global_step=step)
return pf_error
def train(self, n_epochs):
try:
os.mkdir('checkpoints/'+FLAGS.EXP)
except:
pass
writer = tf.compat.v1.summary.FileWriter('./graphs/'+FLAGS.EXP, tf.compat.v1.get_default_graph())
config = tf.compat.v1.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state('checkpoints/'+FLAGS.EXP)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
best_error=200
for epoch in range(n_epochs):
step = self.train_one_epoch(sess, saver, self.iterator, writer, epoch, step)
if (epoch+1) % 10 == 1:
pf_error=self.eval_once(sess, self.iterator, writer, step)
if pf_error<=best_error:
best_error=min(pf_error,best_error)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
print('DONE with best error ',best_error)
writer.close()
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train(n_epochs=2000)
| 7,202 | 42.920732 | 201 | py |
MRI-ROI-prediction | MRI-ROI-prediction-main/main.py | import os
import numpy as np
import time
import glob
import random
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
FLAGS = tf.compat.v1.flags.FLAGS
tf.compat.v1.flags.DEFINE_string('EXP','temp',"exp. name")
tf.compat.v1.flags.DEFINE_integer('mod', 0, "model") # 0=share, 1=chstack, 2=3D
class ConvNet(object):
def __init__(self):
self.lr = 0.0001
self.batch_size = 1
self.gstep = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.image_size=512
if FLAGS.mod==1:
from bmbn2D import inference
elif FLAGS.mod==2:
from bmbn import inference
else:
from share import inference
def parser(self,serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.io.parse_single_example(serialized_example,
features={
'top': tf.io.FixedLenFeature([], tf.float32),
'bottom': tf.io.FixedLenFeature([], tf.float32),
'right': tf.io.FixedLenFeature([], tf.float32),
'left': tf.io.FixedLenFeature([], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
}, name='features')
image = tf.io.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [self.image_size,self.image_size,-1])
label = tf.stack([features['top'],features['bottom'],features['right'],features['left']])
return image,label
def get_data(self):
with tf.name_scope('data'):
self.filenames = tf.compat.v1.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(self.filenames)
dataset=dataset.map(self.parser,num_parallel_calls=4)
if FLAGS.mod!=1:
dataset=dataset.batch(1)
else:
dataset=dataset.padded_batch(self.batch_size,padded_shapes=([512,512,40],[4]))
dataset=dataset.shuffle(100)
self.iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
self.img, self.label= self.iterator.get_next()
self.img=tf.image.per_image_standardization(self.img)
self.shift = tf.compat.v1.placeholder(tf.int32, name='shift')
self.img=tf.roll(self.img,self.shift,[0,1])
self.label+=tf.cast(self.shift[0],tf.float32)
def loss(self):
with tf.name_scope('loss'):
'''toploss=tf.where(self.label[0,0]>self.logits[1],
2*tf.keras.losses.MSE(self.label[:,0],self.logits[1]),
tf.keras.losses.MSE(self.label[:,0],self.logits[1]))
bottomloss=tf.where(self.label[0,1]<self.logits[0],
2*tf.keras.losses.MSE(self.label[:,1],self.logits[0]),
tf.keras.losses.MSE(self.label[:,1],self.logits[0]))
self.loss=toploss+bottomloss'''
self.loss=tf.keras.losses.MSE(self.label[:,1],(self.logits[0]))+tf.keras.losses.MSE(self.label[:,0],self.logits[1])
def optimize(self):
self.opt = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss,
global_step=self.gstep)
def summary(self):
with tf.name_scope('summaries'):
tf.compat.v1.summary.scalar('loss', self.loss)
self.summary_op = tf.compat.v1.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.loss()
self.optimize()
self.summary()
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
start_time = time.time()
train_filenames=sorted(glob.glob("/mnt/raid5/kllei/Loc/trainab/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames: train_filenames})
try:
while True:
shiftof=[-30,-20,-10,0,10,20,30]
shiftofx=[-6,-3,0,3,6]
#feedalp=np.(self.img.shape)
dx=random.choice(shiftofx)
dy=random.choice(shiftof)
_, l, summaries,tsnr,tscore,img = sess.run([self.opt, self.loss, self.summary_op,self.label,self.logits,self.img], feed_dict={self.drop_prob:0.2, self.shift:[dy,dx]})#self.alpha:feedalp
writer.add_summary(summaries, global_step=step)
if step % 100 == 0:
print('Loss at step {0}: {1}'.format(step, l))
step += 1
except tf.errors.OutOfRangeError:
pass
return step
def eval_once(self, sess, init, writer, step):
eval_filenames=sorted(glob.glob("./testab/*.tfrecord"))
sess.run(init.initializer, feed_dict={self.filenames:eval_filenames})
scores=[]
truepf=[]
IoUs=[]
hIoUs=[]
try:
while True:
score,btrue_pf= sess.run([self.logits,self.label], feed_dict={self.drop_prob:0.0, self.shift:[0,0]})
scores+=[score[0],score[1]]
truepf+=[btrue_pf[0][1],btrue_pf[0][0]]
IoUs+=[(min(score[1],btrue_pf[0][0])-max(score[0],btrue_pf[0][1]))/(max(score[1],btrue_pf[0][0])-min(score[0],btrue_pf[0][1]))]
except tf.errors.OutOfRangeError:
pass
print('score= ', scores, 'label= ', truepf)
pf_error=np.mean(abs(np.array(scores)-np.array(truepf)))
IoU=np.mean(np.array(IoUs))
evalsum = tf.compat.v1.Summary()
evalsum.value.add(tag='pf_error', simple_value=pf_error)
evalsum.value.add(tag='IoU', simple_value=IoU)
writer.add_summary(evalsum, global_step=step)
return pf_error
def train(self, n_epochs):
try:
os.mkdir('checkpoints/'+FLAGS.EXP)
except:
pass
writer = tf.compat.v1.summary.FileWriter('./graphs/'+FLAGS.EXP, tf.compat.v1.get_default_graph())
config = tf.compat.v1.ConfigProto(log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state('checkpoints/'+FLAGS.EXP)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
best_error=200
for epoch in range(n_epochs):
step = self.train_one_epoch(sess, saver, self.iterator, writer, epoch, step)
if (epoch+1) % 10 == 1:
pf_error=self.eval_once(sess, self.iterator, writer, step)
if pf_error<=best_error:
best_error=min(pf_error,best_error)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
saver.save(sess, 'checkpoints/'+FLAGS.EXP+'/ckpt', step)
print('DONE with best error ',best_error)
writer.close()
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train(n_epochs=3000)
| 7,211 | 42.709091 | 201 | py |
MRI-ROI-prediction | MRI-ROI-prediction-main/bmbn2D.py | import tensorflow as tf
def inference(self):
conv0 = tf.keras.layers.Conv2D(filters=16,
kernel_size=[5,5],
padding='SAME',
name='conv0')(self.img)
pool0 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2, name='pool0')(conv0)
n0=tf.keras.layers.BatchNormalization()(pool0)
a0=tf.keras.layers.ReLU()(n0)
conv1 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
padding='SAME',
name='conv1')(a0)
pool1 = tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2, name='pool1')(conv1)
n1=tf.keras.layers.BatchNormalization()(pool1)
a1=tf.keras.layers.ReLU()(n1)
conv2 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
strides=[2,2],
padding='SAME',
name='conv2')(a1)
n2=tf.keras.layers.BatchNormalization()(conv2)
a2=tf.keras.layers.ReLU()(n2)
conv3 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[3,3],
strides=2,
padding='SAME',
name='conv3')(a2)
n3=tf.keras.layers.BatchNormalization()(conv3)
a3=tf.keras.layers.ReLU()(n3)
conv31 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[3,3],
strides=1,
padding='SAME',
name='conv31')(a3)
n31=tf.keras.layers.BatchNormalization()(conv31)
a31=tf.keras.layers.ReLU()(n31)
conv30 = tf.keras.layers.Conv2D(filters=32,
kernel_size=[1,1],
strides=2,
padding='SAME',
name='conv30')(a2)
n3=tf.keras.layers.BatchNormalization()(a31+conv30)
a3=tf.keras.layers.ReLU()(n3)
conv4 = tf.keras.layers.Conv2D(filters=16,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name='conv4')(a3)
n4=tf.keras.layers.BatchNormalization()(conv4)
a4=tf.keras.layers.ReLU()(n4)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = tf.keras.layers.Dropout(self.drop_prob,
name='dropout')(a4,training=True)
flat=tf.keras.layers.Flatten()(dropout)
self.logits=tf.squeeze(tf.keras.layers.Dense(2)(flat))
| 2,898 | 44.296875 | 91 | py |
MRI-ROI-prediction | MRI-ROI-prediction-main/bmbn.py | import tensorflow as tf
def inference(self):
conv0 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[5,5,5],
padding='SAME',
name='conv0')(tf.expand_dims(self.img, axis=-1))
pool0 = tf.keras.layers.MaxPool3D(pool_size=[2, 2,1], strides=2, name='pool0')(conv0)
n0=tf.keras.layers.BatchNormalization()(pool0)
a0=tf.keras.layers.ReLU()(n0)
conv1 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[5, 5,5],
padding='SAME',
name='conv1')(a0)
pool1 = tf.keras.layers.MaxPool3D(pool_size=[2,2, 1], strides=2, name='pool1')(conv1)
n1=tf.keras.layers.BatchNormalization()(pool1)
a1=tf.keras.layers.ReLU()(n1)
conv2 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[5, 5,5],
strides=[2,2,1],
padding='SAME',
name='conv2')(a1)
n2=tf.keras.layers.BatchNormalization()(conv2)
a2=tf.keras.layers.ReLU()(n2)
conv3 = tf.keras.layers.Conv3D(filters=32,
kernel_size=[3,3,3],
strides=2,
padding='SAME',
name='conv3')(a2)
n3=tf.keras.layers.BatchNormalization()(conv3)
a3=tf.keras.layers.ReLU()(n3)
conv4 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[3, 3,3],
strides=2,
padding='SAME',
name='conv4')(a3)
n4=tf.keras.layers.BatchNormalization()(conv4)
a4=tf.keras.layers.ReLU()(n4)
conv42 = tf.keras.layers.Conv3D(filters=16,
kernel_size=[3, 3,3],
strides=2,
padding='SAME',
name='conv42')(a3)
n42=tf.keras.layers.BatchNormalization()(conv42)
a42=tf.keras.layers.ReLU()(n42)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = tf.keras.layers.Dropout(self.drop_prob,
name='dropout')(a4,training=True)
dropout2 = tf.keras.layers.Dropout(self.drop_prob,
name='dropout2')(a42,training=True)
flat=tf.keras.layers.Flatten()(dropout)
mean=tf.math.reduce_mean(flat,keepdims=True)
flat2=tf.keras.layers.Flatten()(dropout2)
mean2=tf.math.reduce_mean(flat,keepdims=True)
self.logits=(tf.squeeze(tf.keras.layers.Dense(2)(mean))[0],tf.squeeze(tf.keras.layers.Dense(2)(mean2))[0])
| 2,932 | 44.828125 | 114 | py |
MRI-ROI-prediction | MRI-ROI-prediction-main/share.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def inference(self):
encoder_input = keras.Input(shape=(512, 512, 1), name="one_slice")
x = layers.Conv2D(16, 5, activation="relu", strides=2)(encoder_input)
x = layers.LayerNormalization()(x)
x2 = layers.Conv2D(32, 5, activation="relu", strides=2)(x)
encoder_output = layers.LayerNormalization()(x2)
x3 = layers.Conv2D(32, 3, activation="relu", strides=2)(encoder_output)
encoder_output2 = layers.BatchNormalization()(x3)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
h = layers.Conv2D(32, 3, activation="relu", strides=2)(encoder_output)
h = layers.LayerNormalization()(h)
h = layers.Flatten()(h)
attnn_output = layers.Dense(1)(h)
attnder = keras.Model(encoder_input, attnn_output, name="attentionnet")
use_attn = (False,True)[1]
self.img = tf.transpose(self.img,[3,1,2,0])
stack=tf.vectorized_map(lambda x0:encoder(tf.expand_dims(x0, axis=0)), self.img)
if use_attn:
attention=tf.vectorized_map(lambda x0:attnder(tf.expand_dims(x0, axis=0)), self.img)
self.alpha=layers.Softmax()(tf.squeeze(attention,[1,2]))
first=tf.math.reduce_sum(stack*tf.reshape(self.alpha,(-1,1,1,1,1)),axis=0)
else:
first=tf.math.reduce_mean(stack,axis=0)
x = layers.Conv2D(32, 3, activation="relu", strides=2)(first)
x = layers.BatchNormalization()(x)
flat = layers.Flatten()(x)
self.drop_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')
dropout = layers.Dropout(self.drop_prob, name='dropout')(flat,training=True)
self.logits = tf.squeeze(layers.Dense(2)(dropout))
| 1,724 | 42.125 | 92 | py |
self-adaptive | self-adaptive-master/eval.py | import glob
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import DataLoader
from utils.parser import val_parser
from loss.semantic_seg import CrossEntropyLoss
import models.backbone
import models
from utils.modeling import freeze_layers
from utils.self_adapt_norm import reinit_alpha
from utils.metrics import *
from utils.calibration import *
from datasets.labels import *
from datasets.self_adapt_augment import TrainTestAugDataset
torch.backends.cudnn.benchmark = True
# We set a maximum image size which can be fit on the GPU, in case the image is larger, we first downsample it
# to then upsample the prediction back to the original resolution. This is especially required for high resolution
# Mapillary images
img_max_size = [1024, 2048]
def main(opts):
# Setup metric
time_stamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
iou_meter = runningScore(opts.num_classes)
print(f"Current inference run {time_stamp} has started!")
# Set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Setup dataset and transforms
test_dataset = TrainTestAugDataset(device=device,
root=opts.dataset_root,
only_inf=opts.only_inf,
source=opts.source,
crop_size=img_max_size,
split=opts.dataset_split,
threshold=opts.threshold,
tta=opts.tta,
flips=opts.flips,
scales=opts.scales,
grayscale=opts.grayscale)
test_loader = DataLoader(test_dataset,
batch_size=opts.batch_size,
shuffle=False,
num_workers=opts.num_workers)
# Load and setup model
model = models.__dict__[opts.arch_type](backbone_name=opts.backbone_name,
num_classes=opts.num_classes,
update_source_bn=False,
dropout=opts.dropout)
model = torch.nn.DataParallel(model)
# Pick newest checkpoints
if os.path.exists(opts.checkpoints_root):
checkpoint = max(glob.glob(os.path.join(opts.checkpoints_root, opts.checkpoint)), key=os.path.getctime)
model.load_state_dict(torch.load(checkpoint, map_location=device), strict=True)
# Reinitialize alpha if a custom alpha other than the one in the checkpoints is given
if opts.alpha is not None:
reinit_alpha(model, alpha=opts.alpha, device=device)
else:
raise ValueError(f"Checkpoints directory {opts.checkpoints_root} does not exist")
model = model.to(device)
# Set up Self-adaptive learning optimizer and loss
optimizer = torch.optim.SGD(
model.parameters(),
lr=opts.base_lr,
momentum=opts.momentum,
weight_decay=opts.weight_decay
)
criterion = CrossEntropyLoss().to(device)
if opts.calibration:
# Calibration meter
cal_meter = CalibrationMeter(
device,
n_bins=10,
num_classes=opts.num_classes,
num_images=len(test_loader)
)
model.eval()
# Create GradScaler for mixed precision
if opts.mixed_precision:
scaler = torch.cuda.amp.GradScaler()
for test_idx, (img_test, gt_test, crop_test, crop_transforms) in enumerate(tqdm(test_loader)):
# Put img on GPU if available
img_test = img_test.to(device)
if opts.only_inf:
# Forward pass with original image
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
else:
# Reload checkpoints
model.load_state_dict(torch.load(checkpoint, map_location=device), strict=True)
# Reinitialize alpha if a custom alpha other than the one in the checkpoints is given
if opts.alpha is not None:
reinit_alpha(model, alpha=opts.alpha, device=device)
model = model.to(device)
# Compute augmented predictions
crop_test_fused = []
for crop_test_sub in crop_test:
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=crop_test_sub)['pred']
else:
out_test = model(img=crop_test_sub)['pred']
crop_test_fused.append(torch.nn.functional.softmax(out_test, dim=1))
# Create pseudo gt from augmentations based on their softmax probabilities
pseudo_gt = test_dataset.create_pseudo_gt(
crop_test_fused, crop_transforms, [1, opts.num_classes, *img_test.shape[-2:]]
)
pseudo_gt = pseudo_gt.to(device)
if opts.tta:
# Use pseudo gt for evaluation
out_test = pseudo_gt
else:
model.train()
# Freeze layers if given
freeze_layers(opts, model)
# Self-adaptive learning loop
model = model.to(device)
for epoch in range(opts.num_epochs):
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
if opts.mixed_precision:
with torch.cuda.amp.autocast():
loss_train = criterion(out_test, pseudo_gt)
else:
loss_train = criterion(out_test, pseudo_gt)
optimizer.zero_grad()
if opts.mixed_precision:
scaler.scale(loss_train).backward()
scaler.step(optimizer)
scaler.update()
else:
loss_train.backward()
optimizer.step()
# Do actual forward pass with updated model
model.eval()
with torch.no_grad():
if opts.mixed_precision:
with torch.cuda.amp.autocast():
out_test = model(img=img_test)['pred']
else:
out_test = model(img=img_test)['pred']
# Upsample prediction to gt resolution
out_test = torch.nn.functional.interpolate(out_test, size=gt_test.shape[-2:], mode='bilinear')
# Update calibration meter
if opts.calibration:
cal_meter.calculate_bins(out_test, gt_test.to(device))
# Add prediction
iou_meter.update(gt_test.cpu().numpy(), torch.argmax(out_test, dim=1).cpu().numpy())
# Save output
score, _, _, _ = iou_meter.get_scores()
mean_iou = score['Mean IoU :']
# Compute ECE
if opts.calibration:
cal_meter.calculate_mean_over_dataset()
print(f"ECE: {cal_meter.overall_ece}")
print(f"Mean IoU: {mean_iou}")
print(f"Current inference run {time_stamp} is finished!")
if __name__ == '__main__':
args = val_parser()
print(args)
main(args) | 7,737 | 38.886598 | 114 | py |
self-adaptive | self-adaptive-master/train.py | import pathlib, os
from torch.utils.data import DataLoader
from torch.nn import SyncBatchNorm
from datetime import datetime
from tqdm import tqdm
from shutil import copyfile
from utils.parser import train_parser
import models.backbone
from loss.semantic_seg import CrossEntropyLoss
import datasets
from optimizer.schedulers import *
from utils.metrics import *
from utils.distributed import init_process, clean_up
from utils import transforms
from utils.self_adapt_norm import reinit_alpha
import torch.distributed
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
# We set a maximum image size which can be fit on the GPU, in case the image is larger, we first downsample it
# to then upsample the prediction back to the original resolution. This is especially required for high resolution
# Mapillary images
img_max_size = (1024, 2048)
def main(opts):
# Force disable distributed
opts.distributed = False if not torch.cuda.is_available() else opts.distributed
# Distributed training with multiple gpus
if opts.distributed:
opts.batch_size = opts.batch_size // opts.gpus
mp.spawn(train,
nprocs=opts.gpus,
args=(opts,))
# DataParallel with GPUs or CPU
else:
train(gpu=0, opts=opts)
def train(gpu: int,
opts):
# Create checkpoints directory
pathlib.Path(opts.checkpoints_root).mkdir(parents=True, exist_ok=True)
# Setup dataset
# Get target domain from dataset path
target_train = os.path.basename(opts.dataset_root)
target_val = os.path.basename(opts.val_dataset_root)
train_transforms = transforms.Compose([transforms.RandomResizedCrop(opts.crop_size),
transforms.RandomHFlip(),
transforms.RandGaussianBlur(),
transforms.ColorJitter(),
transforms.MaskGrayscale(),
transforms.ToTensor(),
transforms.IdsToTrainIds(source=target_train, target=target_train),
transforms.Normalize()])
val_transforms = transforms.Compose([transforms.ToTensor(),
transforms.IdsToTrainIds(source=target_train, target=target_val),
transforms.ImgResize(img_max_size),
transforms.Normalize()])
train_dataset = datasets.__dict__[target_train](root=opts.dataset_root,
split="train",
transforms=train_transforms)
val_dataset = datasets.__dict__[target_val](root=opts.val_dataset_root,
split="val",
transforms=val_transforms)
# Setup model
model = models.__dict__[opts.arch_type](backbone_name=opts.backbone_name,
num_classes=opts.num_classes,
alpha=opts.alpha,
dropout=opts.dropout,
update_source_bn=True)
if opts.distributed:
# Initialize process group
rank = init_process(opts, gpu)
# Convert batch normalization to SyncBatchNorm and setup CUDA
model = SyncBatchNorm.convert_sync_batchnorm(model)
torch.cuda.set_device(gpu)
model.cuda(gpu)
# Wrap model in DistributedDataParallel
model = torch.nn.parallel.DistributedDataParallel(module=model, device_ids=[gpu], find_unused_parameters=True)
# Setup data sampler and loader
train_sampler = DistributedSampler(dataset=train_dataset, num_replicas=opts.world_size, rank=rank, shuffle=True)
val_sampler = DistributedSampler(dataset=val_dataset, num_replicas=opts.world_size, rank=rank, shuffle=False)
else:
# Run on GPU if available else on CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model).to(device)
train_sampler = None
val_sampler = None
# Set main process and device
main_process = not opts.distributed or (opts.distributed and rank == 0)
device = gpu if opts.distributed else device
# Add tensorboard writer and setup metric
time_stamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
if main_process:
print(f"Current training run {time_stamp} has started!")
iou_meter = runningScore(opts.num_classes)
alphas = np.round(np.linspace(0, 1, opts.num_alphas), 5) if opts.num_alphas > 1 else [opts.alpha]
# Setup dataloader
train_loader = DataLoader(train_dataset,
batch_size=opts.batch_size,
num_workers=opts.num_workers,
sampler=train_sampler,
shuffle=(train_sampler is None),
pin_memory=True if torch.cuda.is_available() else False)
val_loader = DataLoader(val_dataset,
batch_size=1,
num_workers=opts.num_workers,
sampler=val_sampler,
shuffle=False,
pin_memory=True if torch.cuda.is_available() else False)
# Setup loss
criterion = CrossEntropyLoss().to(device)
# Setup lr scheduler, optimizer and loss
optimizer = torch.optim.SGD(model.parameters(),
lr=opts.base_lr,
momentum=opts.momentum,
weight_decay=opts.weight_decay)
scheduler = get_scheduler(scheduler_type=opts.lr_scheduler,
optimizer=optimizer,
max_iter=len(train_loader) * opts.num_epochs + 1)
# Training
mean_iou_best_alphas = [0] * opts.num_alphas
model.train()
for epoch in tqdm(range(opts.num_epochs)):
if opts.distributed:
train_sampler.set_epoch(epoch)
for train_idx, (img_train, gt_train) in enumerate(train_loader):
# Put img and gt on GPU if available
img_train, gt_train = img_train.to(device), gt_train.to(device)
# Forward pass, backward pass and optimization
out_train = model(img=img_train)
loss_train = criterion(out_train['pred'], gt_train)
# Zero the parameter gradients
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
scheduler.step()
# Validation
if epoch >= opts.validation_start and epoch % opts.validation_step == 0:
if main_process:
# Set model to eval
model.eval()
with torch.no_grad():
score_alphas, class_iou_epoch_alphas = [], []
for alpha_idx, alpha in enumerate(alphas):
reinit_alpha(model, alpha, device)
for val_idx, (img_val, gt_val) in enumerate(val_loader):
# Put img and gt on GPU if available
img_val, gt_val = img_val.to(device), gt_val.to(device)
# Forward pass and loss calculation
out_val = model(img=img_val)['pred']
# Upsample prediction to gt resolution
out_val = torch.nn.functional.interpolate(out_val,
size=gt_val.shape[-2:],
mode='bilinear')
# Update iou meter
iou_meter.update(gt_val.cpu().numpy(), torch.argmax(out_val, dim=1).cpu().numpy())
score, class_iou_epoch, _, _ = iou_meter.get_scores()
mean_iou_epoch = score['Mean IoU :']
score_alphas.append(mean_iou_epoch)
iou_meter.reset()
# Save model if mean iou higher than before
if mean_iou_epoch > mean_iou_best_alphas[alpha_idx]:
checkpoints_path = os.path.join(opts.checkpoints_root,
time_stamp + f'_alpha_{alpha}.pth')
if os.path.isfile(checkpoints_path):
os.remove(checkpoints_path)
torch.save(model.state_dict(), checkpoints_path)
mean_iou_best_alphas[alpha_idx] = mean_iou_epoch
# Switch model to train
model.train()
# Final result
if main_process and epoch == opts.num_epochs - 1:
print(f"alphas: {[i for i in alphas]}:")
print(f"IoUs: {mean_iou_best_alphas}")
checkpoints_path = os.path.join(opts.checkpoints_root, time_stamp + '.pth')
if os.path.isfile(checkpoints_path):
os.remove(checkpoints_path)
alpha_ind_max = torch.argmax(torch.tensor(mean_iou_best_alphas)).item()
alpha = alphas[alpha_ind_max]
checkpoints_alpha_path = os.path.join(opts.checkpoints_root,
time_stamp + f'_alpha_{alpha}.pth')
copyfile(checkpoints_alpha_path, checkpoints_path)
print(f"Saved checkpoint based on alpha = {alpha}")
print(f"Current training run {time_stamp} is finished!")
if opts.distributed:
clean_up()
if __name__ == '__main__':
args = train_parser()
print(args)
main(args) | 9,999 | 44.248869 | 120 | py |
self-adaptive | self-adaptive-master/models/hrnet.py | """Source: https://github.com/HRNet/HRNet-Semantic-Segmentation"""
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by RainbowSecret (yhyuan@pku.edu.cn)
# ------------------------------------------------------------------------------
import torch
import numpy as np
import logging
from typing import Dict
import torch.nn as nn
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
logger = logging.getLogger('hrnet_backbone')
ALIGN_CORNERS = None
__all__ = ['hrnet18', 'hrnet32', 'hrnet48']
model_urls = {
'hrnet18': 'https://opr0mq.dm.files.1drv.com/y4mIoWpP2n-LUohHHANpC0jrOixm1FZgO2OsUtP2DwIozH5RsoYVyv_De5wDgR6XuQmirMV3C0AljLeB-zQXevfLlnQpcNeJlT9Q8LwNYDwh3TsECkMTWXCUn3vDGJWpCxQcQWKONr5VQWO1hLEKPeJbbSZ6tgbWwJHgHF7592HY7ilmGe39o5BhHz7P9QqMYLBts6V7QGoaKrr0PL3wvvR4w',
'hrnet32': 'https://opr74a.dm.files.1drv.com/y4mKOuRSNGQQlp6wm_a9bF-UEQwp6a10xFCLhm4bqjDu6aSNW9yhDRM7qyx0vK0WTh42gEaniUVm3h7pg0H-W0yJff5qQtoAX7Zze4vOsqjoIthp-FW3nlfMD0-gcJi8IiVrMWqVOw2N3MbCud6uQQrTaEAvAdNjtjMpym1JghN-F060rSQKmgtq5R-wJe185IyW4-_c5_ItbhYpCyLxdqdEQ',
'hrnet48': 'https://optgaw.dm.files.1drv.com/y4mWNpya38VArcDInoPaL7GfPMgcop92G6YRkabO1QTSWkCbo7djk8BFZ6LK_KHHIYE8wqeSAChU58NVFOZEvqFaoz392OgcyBrq_f8XGkusQep_oQsuQ7DPQCUrdLwyze_NlsyDGWot0L9agkQ-M_SfNr10ETlCF5R7BdKDZdupmcMXZc-IE3Ysw1bVHdOH4l-XEbEKFAi6ivPUbeqlYkRMQ'
}
# model_urls = {
# 'resnet18_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_a-2f571257.pth',
# 'resnet34_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_a-94bc1577.pth',
# 'resnet50_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_a-d9d0bb7b.pth',
# 'resnet101_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_a-59ea0ac6.pth',
# 'resnet18_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_b-bc2f3c11.pth',
# 'resnet34_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_b-04134c37.pth',
# 'resnet50_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_b-9ca61e85.pth',
# 'resnet101_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_b-c55f6dba.pth',
# }
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True, norm_layer=None):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample, norm_layer=self.norm_layer))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
self.norm_layer(num_inchannels[i])))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear',
align_corners=True
)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self,
cfg,
norm_layer=None,
num_classes: int = 19):
super(HighResolutionNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
# stem network
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = self.norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = self.norm_layer(64)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = cfg['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion * num_channels
# stage 2
self.stage2_cfg = cfg['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = cfg['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = cfg['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
self.norm_layer(last_inp_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=num_classes,
kernel_size=1,
stride=1,
padding=0)
)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
self.norm_layer(outchannels),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample, norm_layer=self.norm_layer))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x = torch.cat([x[0], x1, x2, x3], 1)
x = self.last_layer(x)
return x
def _hrnet(arch, pretrained, progress, num_classes: int = 19):
from models.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], num_classes=num_classes)
if pretrained:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
class HRNet(torch.nn.Module):
def __init__(self,
hrnet_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(HRNet, self).__init__()
self.model = _hrnet(hrnet_name, pretrained=True, progress=True, num_classes=num_classes)
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Compute probabilities for semantic classes
x = self.model(img)
# Upsample to full resolution
x = torch.nn.functional.interpolate(x, size=img.shape[2:], mode='bilinear', align_corners=True)
output_dict['pred'] = x
return output_dict
def hrnet18(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet18",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
def hrnet32(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet32",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn)
def hrnet48(backbone_name: str = None,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return HRNet("hrnet48",
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn) | 23,160 | 38.322581 | 268 | py |
self-adaptive | self-adaptive-master/models/deeplabv3.py | """Source: https://github.com/VainF/DeepLabV3Plus-Pytorch"""
from torch import nn
from torch.nn import functional as F
import torch
from typing import Dict
from collections import OrderedDict
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
from models.backbone_v3 import resnet
__all__ = ["DeepLabV3"]
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return x
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabHeadV3Plus(nn.Module):
def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHeadV3Plus, self).__init__()
self.project = nn.Sequential(
nn.Conv2d(low_level_channels, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.aspp = ASPP(in_channels, aspp_dilate)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
low_level_feature = self.project(feature['low_level'])
output_feature = self.aspp(feature['out'])
output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
align_corners=False)
return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class DeepLabHead(nn.Module):
def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
super(DeepLabHead, self).__init__()
self.classifier = nn.Sequential(
ASPP(in_channels, aspp_dilate),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, num_classes, 1)
)
self._init_weight()
def forward(self, feature):
return self.classifier(feature['out'])
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class AtrousSeparableConvolution(nn.Module):
""" Atrous Separable Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, bias=True):
super(AtrousSeparableConvolution, self).__init__()
self.body = nn.Sequential(
# Separable Conv
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=in_channels),
# PointWise Conv
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
)
self._init_weight()
def forward(self, x):
return self.body(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1), )
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
def convert_to_separable_conv(module):
new_module = module
if isinstance(module, nn.Conv2d) and module.kernel_size[0] > 1:
new_module = AtrousSeparableConvolution(module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.bias)
for name, child in module.named_children():
new_module.add_module(name, convert_to_separable_conv(child))
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride == 8:
replace_stride_with_dilation = [False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation = [False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name == 'deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name == 'deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes, num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone):
if backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride,
pretrained_backbone=pretrained_backbone)
else:
raise NotImplementedError
return model
class DeepLabV3Plus(torch.nn.Module):
def __init__(self,
backbone_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(DeepLabV3Plus, self).__init__()
self.model = _load_model('deeplabv3plus', backbone_name, num_classes, output_stride=8, pretrained_backbone=True)
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Compute probabilities for semantic classes
if self.training and img.shape[0] == 1:
output_dict['pred'] = self.model(torch.cat((img, img), dim=0))[0].unsqueeze(0)
else:
output_dict['pred'] = self.model(img)
return output_dict
def deeplabv3plus(backbone_name: str,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return DeepLabV3Plus(backbone_name,
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn) | 12,863 | 36.614035 | 120 | py |
self-adaptive | self-adaptive-master/models/deeplab.py | import torch
from typing import Dict
from utils.dropout import add_dropout
from utils.self_adapt_norm import replace_batchnorm
import models.backbone
class DeepLab(torch.nn.Module):
def __init__(self,
backbone_name: str,
num_classes: int = 19,
dropout: bool = False,
alpha: float = 0.0,
update_source_bn: bool = True):
super(DeepLab, self).__init__()
self.backbone = models.backbone.__dict__[backbone_name](pretrained=True)
# Initialize classification head
self.cls_head = torch.nn.Conv2d(
self.backbone.out_channels, num_classes, kernel_size=1, stride=1, padding=0
)
torch.nn.init.normal_(self.cls_head.weight.data, mean=0, std=0.01)
torch.nn.init.constant_(self.cls_head.bias.data, 0.0)
# Variable image size during forward pass
self.img_size = None
# Add dropout layers after relu
if dropout:
add_dropout(model=self)
# Replace BN layers with SaN layers
replace_batchnorm(self, alpha=alpha, update_source_bn=update_source_bn)
def forward(self,
img: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Args:
img: Batch of input images
Returns:
output:
'backbone': Output features of backbone
'pred': Segmentation output of images
"""
# Create output dict of forward pass
output_dict = {}
# Set image output size
self.img_size = img.shape[2:]
# Compute probabilities for semantic classes at stride 8
x = self.backbone(img)
output_dict['backbone'] = x
# Compute output logits
x = self._backbone_to_logits(x)
output_dict['pred'] = x
return output_dict
def _backbone_to_logits(self,
x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: Backbone output features
Returns:
x: Upsampled semantic segmentation logits
"""
# Compute class logits
x = self.cls_head(x)
# Bilinear upsampling to full resolution
x = torch.nn.functional.interpolate(x,
size=self.img_size,
mode='bilinear',
align_corners=True)
return x
def deeplab(backbone_name: str,
num_classes: int = 19,
alpha: float = 0.5,
update_source_bn: bool = True,
dropout: bool = False):
return DeepLab(backbone_name,
num_classes,
dropout,
alpha=alpha,
update_source_bn=update_source_bn) | 2,839 | 29.869565 | 87 | py |
self-adaptive | self-adaptive-master/models/backbone/resnet.py | '''
Source: torchvision
'''
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
# __all__ = {'resnet18': resnet18, 'resnet50': resnet50}
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.out_channels = list(module
for module in self.modules()
if isinstance(module, torch.nn.Conv2d))[-1].out_channels
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
#if dilate:
# self.dilation *= stride
# stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet101(pretrained=False,
progress=True,
**kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False,
progress=True,
**kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
return model
| 10,374 | 38.150943 | 106 | py |
self-adaptive | self-adaptive-master/models/backbone_v3/resnet.py | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | 13,547 | 39.807229 | 107 | py |
self-adaptive | self-adaptive-master/datasets/labels.py | import torch
from collections import namedtuple
from cityscapesscripts.helpers.labels import labels as cs_labels
from cityscapesscripts.helpers.labels import Label
synthia_cs_labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
Label('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
Label('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
Label('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
Label('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
Label('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
Label('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
Label('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
Label('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
Label('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
Label('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
Label('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
Label('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
Label('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
Label('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
Label('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
Label('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
Label('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
Label('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
Label('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
Label('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
Label('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
Label('terrain', 22, 255, 'nature', 4, False, True, (152, 251, 152)),
# Removed because not present in Synthia dataset
Label('sky', 23, 9, 'sky', 5, False, False, (70, 130, 180)),
Label('person', 24, 10, 'human', 6, True, False, (220, 20, 60)),
Label('rider', 25, 11, 'human', 6, True, False, (255, 0, 0)),
Label('car', 26, 12, 'vehicle', 7, True, False, (0, 0, 142)),
Label('truck', 27, 255, 'vehicle', 7, True, True, (0, 0, 70)), # Removed because not present in Synthia dataset
Label('bus', 28, 13, 'vehicle', 7, True, False, (0, 60, 100)),
Label('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
Label('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
Label('train', 31, 255, 'vehicle', 7, True, True, (0, 80, 100)), # Removed because not present in Synthia dataset
Label('motorcycle', 32, 14, 'vehicle', 7, True, False, (0, 0, 230)),
Label('bicycle', 33, 15, 'vehicle', 7, True, False, (119, 11, 32)),
Label('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),
]
synthia_bdd_labels = [
Label('unlabeled', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('dynamic', 255, 255, 'void', 0, False, True, (111, 74, 0)),
Label('ego vehicle', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('ground', 255, 255, 'void', 0, False, True, (81, 0, 81)),
Label('static', 255, 255, 'void', 0, False, True, (0, 0, 0)),
Label('parking', 255, 255, 'flat', 1, False, True, (250, 170, 160)),
Label('rail track', 255, 255, 'flat', 1, False, True, (230, 150, 140)),
Label('road', 0, 0, 'flat', 1, False, False, (128, 64, 128)),
Label('sidewalk', 1, 1, 'flat', 1, False, False, (244, 35, 232)),
Label('bridge', 255, 255, 'construction', 2, False, True, (150, 100, 100)),
Label('building', 2, 2, 'construction', 2, False, False, (70, 70, 70)),
Label('wall', 3, 3, 'construction', 2, False, False, (102, 102, 156)),
Label('fence', 4, 4, 'construction', 2, False, False, (190, 153, 153)),
Label('garage', 255, 255, 'construction', 2, False, True, (180, 100, 180)),
Label('guard rail', 255, 255, 'construction', 2, False, True, (180, 165, 180)),
Label('tunnel', 255, 255, 'construction', 2, False, True, (150, 120, 90)),
Label('banner', 255, 255, 'object', 3, False, True, (250, 170, 100)),
Label('billboard', 255, 255, 'object', 3, False, True, (220, 220, 250)),
Label('lane divider', 255, 255, 'object', 3, False, True, (255, 165, 0)),
Label('parking sign', 255, 255, 'object', 3, False, False, (220, 20, 60)),
Label('pole', 5, 5, 'object', 3, False, False, (153, 153, 153)),
Label('polegroup', 255, 255, 'object', 3, False, True, (153, 153, 153)),
Label('street light', 255, 255, 'object', 3, False, True, (220, 220, 100)),
Label('traffic cone', 255, 255, 'object', 3, False, True, (255, 70, 0)),
Label('traffic device', 255, 255, 'object', 3, False, True, (220, 220, 220)),
Label('traffic light', 6, 6, 'object', 3, False, False, (250, 170, 30)),
Label('traffic sign', 7, 7, 'object', 3, False, False, (220, 220, 0)),
Label('traffic sign frame', 255, 255, 'object', 3, False, True, (250, 170, 250)),
Label('vegetation', 8, 8, 'nature', 4, False, False, (107, 142, 35)),
Label('terrain', 9, 255, 'nature', 4, False, True, (152, 251, 152)), # Removed from dataset
Label('sky', 10, 9, 'sky', 5, False, False, (70, 130, 180)),
Label('person', 11, 10, 'human', 6, True, False, (220, 20, 60)),
Label('rider', 12, 11, 'human', 6, True, False, (255, 0, 0)),
Label('car', 13, 12, 'vehicle', 7, True, False, (0, 0, 142)),
Label('bus', 15, 13, 'vehicle', 7, True, False, (0, 60, 100)),
Label('motorcycle', 17, 14, 'vehicle', 7, True, False, (0, 0, 230)),
Label('bicycle', 18, 15, 'vehicle', 7, True, False, (119, 11, 32)),
Label('caravan', 255, 255, 'vehicle', 7, True, True, (0, 0, 90)),
Label('trailer', 255, 255, 'vehicle', 7, True, True, (0, 0, 110)),
Label('truck', 14, 255, 'vehicle', 7, True, False, (0, 0, 70)),
Label('train', 16, 255, 'vehicle', 7, True, False, (0, 80, 100)),
]
SynthiaClass = namedtuple(
"SynthiaClass",
["name", "id", "trainId", "ignoreInEval", "color"]
)
synthia_labels = [
SynthiaClass("road", 3, 0, False, (128, 64, 128)),
SynthiaClass("sidewalk", 4, 1, False, (244, 35, 232)),
SynthiaClass("building", 2, 2, False, (70, 70, 70)),
SynthiaClass("wall", 21, 3, False, (102, 102, 156)),
SynthiaClass("fence", 5, 4, False, (64, 64, 128)),
SynthiaClass("pole", 7, 5, False, (153, 153, 153)),
SynthiaClass("traffic light", 15, 6, False, (250, 170, 30)),
SynthiaClass("traffic sign", 9, 7, False, (220, 220, 0)),
SynthiaClass("vegetation", 6, 8, False, (107, 142, 35)),
SynthiaClass("terrain", 16, 255, True, (152, 251, 152)),
SynthiaClass("sky", 1, 9, False, (70, 130, 180)),
SynthiaClass("pedestrian", 10, 10, False, (220, 20, 60)),
SynthiaClass("rider", 17, 11, False, (255, 0, 0)),
SynthiaClass("car", 8, 12, False, (0, 0, 142)),
SynthiaClass("truck", 18, 255, True, (0, 0, 70)),
SynthiaClass("bus", 19, 13, False, (0, 60, 100)),
SynthiaClass("train", 20, 255, True, (0, 80, 100)),
SynthiaClass("motorcycle", 12, 14, False, (0, 0, 230)),
SynthiaClass("bicycle", 11, 15, False, (119, 11, 32)),
SynthiaClass("void", 0, 255, True, (0, 0, 0)),
SynthiaClass("parking slot", 13, 255, True, (250, 170, 160)),
SynthiaClass("road-work", 14, 255, True, (128, 64, 64)),
SynthiaClass("lanemarking", 22, 255, True, (102, 102, 156))
]
MapillaryClass = namedtuple(
"MapillaryClass",
["id", "trainId"]
)
mapillary_labels = [
MapillaryClass(13, 0),
MapillaryClass(24, 0),
MapillaryClass(41, 0),
MapillaryClass(2, 1),
MapillaryClass(15, 1),
MapillaryClass(17, 2),
MapillaryClass(6, 3),
MapillaryClass(3, 4),
MapillaryClass(45, 5),
MapillaryClass(47, 5),
MapillaryClass(48, 6),
MapillaryClass(50, 7),
MapillaryClass(30, 8),
MapillaryClass(29, 9),
MapillaryClass(27, 10),
MapillaryClass(19, 11),
MapillaryClass(20, 12),
MapillaryClass(21, 12),
MapillaryClass(22, 12),
MapillaryClass(55, 13),
MapillaryClass(61, 14),
MapillaryClass(54, 15),
MapillaryClass(58, 16),
MapillaryClass(57, 17),
MapillaryClass(52, 18),
]
mapillary_synthia_labels = [
MapillaryClass(13, 0),
MapillaryClass(24, 0),
MapillaryClass(41, 0),
MapillaryClass(2, 1),
MapillaryClass(15, 1),
MapillaryClass(17, 2),
MapillaryClass(6, 3),
MapillaryClass(3, 4),
MapillaryClass(45, 5),
MapillaryClass(47, 5),
MapillaryClass(48, 6),
MapillaryClass(50, 7),
MapillaryClass(30, 8),
MapillaryClass(29, 255), #terrain
MapillaryClass(27, 9),
MapillaryClass(19, 10),
MapillaryClass(20, 11),
MapillaryClass(21, 11),
MapillaryClass(22, 11),
MapillaryClass(55, 12),
MapillaryClass(61, 255), #truck
MapillaryClass(54, 13),
MapillaryClass(58, 255), #train
MapillaryClass(57, 14),
MapillaryClass(52, 15),
]
WilddashClass = namedtuple(
"WilddashClass",
["id", "trainId"]
)
wilddash_labels = [
WilddashClass(0, 255),
WilddashClass(1, 255),
WilddashClass(2, 255),
WilddashClass(3, 255),
WilddashClass(4, 255),
WilddashClass(5, 255),
WilddashClass(6, 255),
WilddashClass(7, 0),
WilddashClass(8, 1),
WilddashClass(9, 255),
WilddashClass(10, 255),
WilddashClass(11, 2),
WilddashClass(12, 3),
WilddashClass(13, 4),
WilddashClass(14, 255),
WilddashClass(15, 255),
WilddashClass(16, 255),
WilddashClass(17, 5),
WilddashClass(18, 255),
WilddashClass(19, 6),
WilddashClass(20, 7),
WilddashClass(21, 8),
WilddashClass(22, 9),
WilddashClass(23, 10),
WilddashClass(24, 11),
WilddashClass(25, 12),
WilddashClass(26, 13),
WilddashClass(27, 14),
WilddashClass(28, 15),
WilddashClass(29, 255),
WilddashClass(30, 255),
WilddashClass(31, 16),
WilddashClass(32, 17),
WilddashClass(33, 18),
WilddashClass(34, 13),
WilddashClass(35, 13),
WilddashClass(36, 255),
WilddashClass(37, 255),
WilddashClass(38, 0),
]
wilddash_synthia_labels = [
WilddashClass(0, 255),
WilddashClass(1, 255),
WilddashClass(2, 255),
WilddashClass(3, 255),
WilddashClass(4, 255),
WilddashClass(5, 255),
WilddashClass(6, 255),
WilddashClass(7, 0),
WilddashClass(8, 1),
WilddashClass(9, 255),
WilddashClass(10, 255),
WilddashClass(11, 2),
WilddashClass(12, 3),
WilddashClass(13, 4),
WilddashClass(14, 255),
WilddashClass(15, 255),
WilddashClass(16, 255),
WilddashClass(17, 5),
WilddashClass(18, 255),
WilddashClass(19, 6),
WilddashClass(20, 7),
WilddashClass(21, 8),
WilddashClass(22, 255), #terrain
WilddashClass(23, 9),
WilddashClass(24, 10),
WilddashClass(25, 11),
WilddashClass(26, 12),
WilddashClass(27, 255), #truck
WilddashClass(28, 13),
WilddashClass(29, 255),
WilddashClass(30, 255),
WilddashClass(31, 255), #train
WilddashClass(32, 14),
WilddashClass(33, 15),
WilddashClass(34, 12),
WilddashClass(35, 12),
WilddashClass(36, 255),
WilddashClass(37, 255),
WilddashClass(38, 0),
]
def convert_ids_to_trainids(gt: torch.Tensor,
source: str,
target: str) -> torch.Tensor:
"""
Args:
gt: Ground truth tensor with labels from 0 to 34 / 0 to 33 and -1
source: Name of source domain
target: Name of target domain
Returns:
gt: Groundtruth tensor with labels from 0 to 18 and 255 for non training ids
"""
# Check if target domain is BDD, if true check source domain
if target == "bdd":
# Check if source domain is GTA, if true, return gt without conversion, because BDD has train_ids -> [0, 19]
if source == "gta":
return gt
else:
# If source domain is Synthia, use Synthia/BDD lookup table
labels = synthia_bdd_labels
# Check if target domain is IDD, if true check source domain
elif target == "idd":
# Check if source domain is GTA, if true, return gt without conversion, because IDD has train_ids -> [0, 19]
if source == "gta":
return gt
else:
# If source domain is Synthia, use Synthia/IDD lookup table
labels = synthia_bdd_labels
# If target is not BDD, check source domain
elif target == "synthia" and source == "synthia":
labels = synthia_labels
elif target == "mapillary":
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = mapillary_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = mapillary_synthia_labels
elif target == "wilddash":
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = wilddash_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = wilddash_synthia_labels
elif target in ["cityscapes", "gta"]:
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = cs_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = synthia_cs_labels
else:
raise ValueError(f"Target domain {target} unknown")
gt_copy = torch.ones_like(gt) * 255
for cs_label in labels:
orig_id = cs_label.id
new_id = cs_label.trainId
# Manually set license plate to id 34 and trainId 255
if orig_id == -1:
orig_id = 34
new_id = 255
gt_copy[gt == orig_id] = new_id
return gt_copy
def convert_trainids_to_ids(pred: torch.Tensor,
source: str,
target: str) -> torch.Tensor:
"""
Args:
gt: Groundtruth tensor with labels from 0 to 34 / 0 to 33 and -1
source: Name of source domain
target: Name of target domain
Returns:
gt: Groundtruth tensor with labels from 0 to 18 and 255 for non training ids
"""
# Check if target domain is BDD, if true check source domain
if target == "bdd":
# Check if source domain is GTA, if true, return gt without conversion, because BDD has train_ids -> [0, 19]
if source == "gta":
return pred
else:
# If source domain is Synthia, use Synthia/BDD lookup table
labels = synthia_bdd_labels
# If target is not BDD, check source domain
else:
# If source domain is GTA, use standard CS lookup table
if source == "gta":
labels = cs_labels
# If source domain is Synthia, use Synthia/CS lookup table
else:
labels = synthia_cs_labels
for cs_label in labels[::-1]:
orig_id = cs_label.id
new_id = cs_label.trainId
# Manually set license plate to id 34 and trainId 255
if orig_id == -1:
orig_id = 34
new_id = 255
pred[pred == orig_id] = new_id
return pred
| 15,789 | 38.673367 | 118 | py |
self-adaptive | self-adaptive-master/datasets/wilddash.py | import os
import torch
from PIL import Image
from typing import Callable, Optional, Tuple, List
class WilddashDataset(object):
"""
Unzip the downloaded wd_public_02.zip to /path/to/wilddash
The wilddash dataset is required to have following folder structure after unzipping:
wilddash/
/images/*.jpg
/labels/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(WilddashDataset, self).__init__()
self.split = split
self.transforms = transforms
images_root = os.path.join(root, "images")
self.images = []
targets_root = os.path.join(root, "labels")
self.targets = []
for img_name in os.listdir(images_root):
target_name = img_name.replace(".jpg", ".png")
self.images.append(os.path.join(images_root, img_name))
self.targets.append(os.path.join(targets_root, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def wilddash(root: str,
split: str,
transforms: List[Callable]):
return WilddashDataset(root=root,
split=split,
transforms=transforms) | 1,608 | 30.54902 | 88 | py |
self-adaptive | self-adaptive-master/datasets/cityscapes.py | import torchvision
from typing import Any, List, Callable
class CityscapesDataset(torchvision.datasets.Cityscapes):
def __init__(self,
transforms: List[Callable],
*args: Any,
**kwargs: Any):
super(CityscapesDataset, self).__init__(*args,
**kwargs,
target_type="semantic")
self.transforms = transforms
def cityscapes(root: str,
split: str,
transforms: List[Callable]):
return CityscapesDataset(root=root,
split=split,
transforms=transforms)
| 704 | 29.652174 | 71 | py |
self-adaptive | self-adaptive-master/datasets/idd.py | import os
from typing import Tuple, List, Callable, Optional
from PIL import Image
import torch
class IDDDataset(object):
"""
Follow these steps to prepare the IDD dataset:
- Unpack the downloaded dataset: tar -xf idd-segmentation.tar.gz -C /path/to/IDD_Segmentation/
- Rename the directory from IDD_Segmentation to idd: mv /path/to/IDD_Segmentation /path/to/idd
Create train IDs from polygon annotations:
- wget https://github.com/AutoNUE/public-code/archive/refs/heads/master.zip
- unzip master.zip -d iddscripts
- export PYTHONPATH="${PYTHONPATH}:iddscripts/public-code-master/helpers/"
- pip install -r iddscripts/public-code-master/requirements.txt
- python iddscripts/public-code-master/preperation/createLabels.py --datadir /path/to/idd --id-type csTrainId --num-workers 1
- rm -rf iddscripts
The IDD dataset is required to have following folder structure:
idd/
leftImg8bit/
train/city/*.png
test/city/*.png
val/city/*.png
gtFine/
train/city/*.png
test/city/*.png
val/city/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(IDDDataset, self).__init__()
self.mode = 'gtFine'
self.images_dir = os.path.join(root, 'leftImg8bit', split)
self.targets_dir = os.path.join(root, self.mode, split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_name = file_name.split("_leftImg8bit.png")[0] + "_gtFine_labelcsTrainIds.png"
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(os.path.join(target_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def idd(root: str,
split: str,
transforms: List[Callable]):
return IDDDataset(root=root,
split=split,
transforms=transforms) | 2,650 | 37.42029 | 129 | py |
self-adaptive | self-adaptive-master/datasets/self_adapt_augment.py | import torchvision.transforms.functional as F
import torchvision.transforms as tf
from PIL import Image, ImageFilter
import torch
from typing import List, Any
import os
import datasets
from utils import transforms
class TrainTestAugDataset:
def __init__(self,
device,
source,
crop_size: List[int],
transforms_list: transforms.Compose = transforms.Compose([]),
only_inf: bool = False,
combined_augmentation: bool = True,
ignore_index: int = 255,
threshold: float = 0.7,
getinfo: bool = False,
tta: bool = False,
flip_all_augs: bool = False,
flips: bool = True,
scales: list = [1.0],
grayscale: bool = False,
colorjitter: bool = False,
gaussblur: bool = False,
rotation: bool = False,
rot_angle: int = 30,
jitter_factor: float = 0.4,
gauss_radius: float = 1.0,
*args: Any,
**kwargs: Any):
self.root = kwargs['root']
self.device = device
self.source = source
self.target = os.path.basename(self.root)
self.dataset = datasets.__dict__[self.target](root=self.root,
split=kwargs['split'],
transforms=transforms_list)
self.combined_augmentation=combined_augmentation
self.dataset.transforms = transforms_list
self.ignore_index = ignore_index
self.threshold = threshold
self.getinfo = getinfo
self.tta = tta
self.scales = scales
self.flip_all_augs = flip_all_augs
self.flips = flips
self.grayscale = grayscale
self.colorjitter = colorjitter
self.gaussblur = gaussblur
self.rotation = rotation
self.rot_angle = int(rot_angle)
self.jitter_factor = jitter_factor
self.gauss_radius = gauss_radius
self.augs = [None]
if self.flips: self.augs.append("flip")
if self.grayscale: self.augs.append("gray")
if self.colorjitter: self.augs.append("jitter")
if self.gaussblur: self.augs.append("gauss")
if self.rotation: self.augs.append("rot")
self.resize_image_pre = transforms.ImgResizePIL(crop_size)
self.only_inf = only_inf
def __getitem__(self, idx: int):
image, target = self.dataset.__getitem__(idx)
# Resize image
image = self.resize_image_pre(image)
crop_imgs = []
transforms_list = []
trans = transforms.Compose([transforms.ToTensor(),
transforms.IdsToTrainIds(source=self.source, target=self.target),
transforms.Normalize()])
if self.only_inf:
image, target = trans(image, target)
return image, target, [], []
if self.combined_augmentation: self.augs = [None, None]
for scale in self.scales:
for idx, aug in enumerate(self.augs):
# Apply scaling
i, j = 0, 0
w, h = [int(i*scale) for i in image.size]
crop_img = image.resize((w, h), Image.BILINEAR)
# Additional augmentations on every duplicate of the scale
flip_action, rot_action, gray_action, jitter_action, gauss_action = False, False, False, False, False
if self.flip_all_augs and idx != 0:
flip_action = True
crop_img = F.hflip(crop_img)
if self.combined_augmentation and idx == 1:
if self.flips:
flip_action = True
crop_img = F.hflip(crop_img)
if self.rotation:
rot_action = True
crop_img = F.rotate(crop_img, angle=self.rot_angle, expand=True, fill=0)
if self.colorjitter:
jitter_action = True
crop_img = tf.ColorJitter(brightness=self.jitter_factor,
contrast=self.jitter_factor,
saturation=self.jitter_factor,
hue=min(0.1, self.jitter_factor))(crop_img)
if self.gaussblur:
gauss_action = True
crop_img = crop_img.filter(ImageFilter.GaussianBlur(self.gauss_radius))
if self.grayscale:
gray_action = True
crop_img = F.to_grayscale(crop_img, num_output_channels=3)
if not self.combined_augmentation:
if aug == "flip":
flip_action = True
crop_img = F.hflip(crop_img)
if aug == "rot":
rot_action = True
crop_img = F.rotate(crop_img, angle=self.rot_angle, expand=True, fill=0)
if aug == "jitter":
jitter_action = True
crop_img = tf.ColorJitter(brightness=self.jitter_factor,
contrast=self.jitter_factor,
saturation=self.jitter_factor,
hue=min(0.1, self.jitter_factor))(crop_img)
if aug == "gauss":
gauss_action = True
crop_img = crop_img.filter(ImageFilter.GaussianBlur(self.gauss_radius))
if aug == "gray":
gray_action = True
crop_img = F.to_grayscale(crop_img, num_output_channels=3)
crop_img, _ = trans(crop_img, crop_img)
transforms_list.append((i, j, w, h, flip_action, rot_action,
self.rot_angle, gray_action, jitter_action, gauss_action))
crop_imgs.append(crop_img)
image, target = trans(image, target)
return image, target, crop_imgs, transforms_list
def __len__(self) -> int:
return len(self.dataset.images)
def create_pseudo_gt(self,
crops_soft: torch.Tensor,
crop_transforms: List[List[torch.Tensor]],
out_shape: torch.Tensor) -> torch.Tensor:
"""
Args:
crops_soft: Tensor with model outputs of crops (N, C, H, W)
crop_transforms: List with transformations (e.g. random crop and hflip parameters)
out_shape: Tensor with output shape
Returns:
pseudo_gt: Pseudo ground truth based on softmax probabilities
"""
num_classes = crops_soft[0].shape[1]
crops_soft_all = torch.ones(len(crops_soft), num_classes, *out_shape[-2:]) * self.ignore_index
for crop_idx, (crop_soft, crop_transform) in enumerate(zip(crops_soft, crop_transforms)):
i, j, h, w, flip_action, rot_action, rot_angle, gray_action, jitter_action, gauss_action = crop_transform
# Reaugment Images
if rot_action:
# Rotate back
crop_soft = F.rotate(crop_soft, angle=-int(rot_angle))
crop_soft = tf.CenterCrop(size=(h, w))(crop_soft)
if flip_action:
crop_soft = F.hflip(crop_soft)
# Scale to original scale
crop_soft = torch.nn.functional.interpolate(
crop_soft, size=[*out_shape[-2:]], mode='bilinear', align_corners=True
)
h, w = out_shape[-2:]
crops_soft_all[crop_idx, :, i:i+h, j:j+w] = crop_soft.squeeze(0)
pseudo_gt = torch.mean(crops_soft_all, dim=0)
if self.tta:
pseudo_gt = pseudo_gt.unsqueeze(0)
else:
# Create mask to compare only max predictions
compare_mask = torch.amax(pseudo_gt, dim=0, keepdim=True) == pseudo_gt
class_threshold = self.threshold * torch.max(torch.max(pseudo_gt, dim=1)[0], dim=1)[0]
if self.getinfo: print(f"Class thresholds: {class_threshold}")
class_threshold = class_threshold.unsqueeze(1).unsqueeze(1).repeat(1, pseudo_gt.shape[1],
pseudo_gt.shape[2])
# Set ignore indices for pixels having not enough pixels or ignore indices
threshold_mask = class_threshold < pseudo_gt
threshold_mask = torch.amax(torch.mul(threshold_mask, compare_mask), dim=0)
final_mask = threshold_mask.unsqueeze(0).unsqueeze(0)
pseudo_gt = torch.argmax(pseudo_gt, dim=0, keepdim=True).unsqueeze(0)
pseudo_gt[~final_mask] = self.ignore_index
return pseudo_gt
| 9,098 | 43.385366 | 117 | py |
self-adaptive | self-adaptive-master/datasets/gta.py | import os
import glob
import argparse
import pathlib
import PIL.Image
import torch
from typing import List, Callable, Optional, Tuple
from tqdm import tqdm
import urllib.request
import shutil
import scipy.io
class GTADataset(object):
"""
Download, unzip, and split data: python datasets/gta.py --dataset-root /path/to/gta --download-data --split-data
This also removes samples with size mismatches between image and annotation
The GTA dataset is required to have following folder structure:
gta/
images/
train/*.png
test/*.png
val/*.png
labels/
train/*.png
test/*.png
val/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(GTADataset, self).__init__()
self.images_dir = os.path.join(root, "images", split)
self.targets_dir = os.path.join(root, "labels", split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for file_name in os.listdir(self.images_dir):
target_name = file_name
self.images.append(os.path.join(self.images_dir, file_name))
self.targets.append(os.path.join(self.targets_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = PIL.Image.open(self.images[index]).convert('RGB')
target = PIL.Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def gta(root: str,
split: str,
transforms: List[Callable]):
return GTADataset(root=root,
split=split,
transforms=transforms)
def preprocess(dataset_root: str):
"""
Function to remove data samples with size mismatches between image and annotation
"""
# Create catalog of every GTA image in dataset directory
dataset_split = ["train", "val", "test"]
# Count deleted files
count_del = 0
for split in dataset_split:
images = sorted(glob.glob(os.path.join(dataset_root, "images", split, "*.png")))
labels = sorted(glob.glob(os.path.join(dataset_root, "labels", split, "*.png")))
assert len(images) == len(labels), "Length of catalogs does not match!"
print("Preprocessing images and labels")
for image, label in tqdm(zip(images, labels)):
# Assert that label corresponds to current image
image_name = image.split("/")[-1]
label_name = label.split("/")[-1]
assert image_name == label_name
# Load image and label
img = PIL.Image.open(image)
gt = PIL.Image.open(label)
if img.size != gt.size:
print(f"Found data sample pair with unmatching size. Deleting file with name: {image_name} and {label_name}.")
# Delete mismatching data samples
os.remove(path=image)
os.remove(path=label)
count_del += 1
print(f"{count_del} images have been removed from the dataset")
def download_dataset(dataset_root: str, download_path_main: str ="https://download.visinf.tu-darmstadt.de/data/from_games"):
download_path = os.path.join(download_path_main, "data")
pathlib.Path(dataset_root).mkdir(exist_ok=True, parents=True)
for i in tqdm(range(1, 11)):
index = f"{i:02}"
for file_name in ["images", "labels"]:
file_name_zip = f"{index}_{file_name}.zip"
file_path = os.path.join(download_path, file_name_zip)
out_path = os.path.join(dataset_root, file_name_zip)
urllib.request.urlretrieve(file_path, filename=out_path)
shutil.unpack_archive(out_path, dataset_root)
os.remove(out_path)
mapping_name = "read_mapping.zip"
download_path_map = os.path.join(download_path_main, "code", mapping_name)
out_path = os.path.join(dataset_root, mapping_name)
urllib.request.urlretrieve(download_path_map, filename=out_path)
shutil.unpack_archive(out_path, os.path.join(dataset_root, "read_mapping"))
os.remove(out_path)
def load_split(path: str):
mat = scipy.io.loadmat(path)
trainIds = mat['trainIds']
valIds = mat['valIds']
testIds = mat['testIds']
return trainIds, valIds, testIds
def load_mapping(path: str):
mat = scipy.io.loadmat(path)
classes = mat['classes']
cityscapesMap = mat['cityscapesMap']
camvidMap = mat['camvidMap']
return classes, cityscapesMap, camvidMap
def split_dataset(dataset_root):
# Get trainIds, valIds, testIds
path_to_map = os.path.join(dataset_root, "read_mapping")
path_to_mat = os.path.join(path_to_map, "split.mat")
trainIds, valIds, testIds = load_split(path=path_to_mat)
split_ids = [trainIds.squeeze(), valIds.squeeze(), testIds.squeeze()]
split_paths = ['train', 'val', 'test']
img_dir = os.path.join(dataset_root, "images")
label_dir = os.path.join(dataset_root, "labels")
img_out_dir = os.path.join(dataset_root, "images")
label_out_dir = os.path.join(dataset_root, "labels")
for split_id, split_path in zip(split_ids, split_paths):
path_split_image = os.path.join(img_out_dir, split_path)
path_split_label = os.path.join(label_out_dir, split_path)
pathlib.Path(path_split_label).mkdir(parents=True, exist_ok=True)
pathlib.Path(path_split_image).mkdir(parents=True, exist_ok=True)
for img_id in tqdm(split_id):
img_name = str(img_id).zfill(5) + '.png'
shutil.move(os.path.join(img_dir, img_name), os.path.join(path_split_image, img_name))
shutil.move(os.path.join(label_dir, img_name), os.path.join(path_split_label, img_name))
shutil.rmtree(path_to_map)
if img_dir != img_out_dir:
shutil.rmtree(img_dir)
if label_dir != label_out_dir:
shutil.rmtree(label_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-root", type=str, default=os.path.join(os.getcwd(), "datasets", "gta"))
parser.add_argument("--download-data", action="store_true")
parser.add_argument("--split-data", action="store_true")
args = parser.parse_args()
if args.download_data:
download_dataset(args.dataset_root)
if args.split_data:
split_dataset(args.dataset_root)
preprocess(args.dataset_root) | 6,655 | 37.473988 | 126 | py |
self-adaptive | self-adaptive-master/datasets/bdd.py | import torch
import os
from PIL import Image
from typing import Callable, Optional, Tuple, List
class BerkeleyDataset(object):
"""
First unzip the images: unzip bdd100k_images_10k.zip -d /path/to/bdd100k
Second unzip the labels in the same directory: unzip bdd100k_sem_seg_labels_trainval.zip -d /path/to/bdd100k
Third rename the directory from bdd100k to bdd: mv /path/to/bdd100k /path/to/bdd
The BDD dataset is required to have following folder structure:
bdd/
images/
10k/
train/*.jpg
test/*.jpg
val/*.jpg
labels/
sem_seg/
masks/
train/*.png
val/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(BerkeleyDataset, self).__init__()
self.split = split
self.transforms = transforms
images_root = os.path.join(root, "images", "10k", split)
self.images = []
targets_root = os.path.join(root, "labels", "sem_seg", "masks", split)
self.targets = []
for img_name in os.listdir(images_root):
target_name = img_name.replace(".jpg", ".png")
self.images.append(os.path.join(images_root, img_name))
self.targets.append(os.path.join(targets_root, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def bdd(root: str,
split: str,
transforms: List[Callable]):
return BerkeleyDataset(root=root,
split=split,
transforms=transforms)
| 2,054 | 31.619048 | 112 | py |
self-adaptive | self-adaptive-master/datasets/synthia.py | from PIL import Image
from typing import Optional, Callable, Tuple, List
import os
import torch
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class SynthiaDataset(object):
"""
The Synthia dataset is required to have following folder structure:
synthia/
leftImg8bit/
train/seq_id/*.png
val/seq_id/*.png
gtFine/
train/seq_id/*.png
val/seq_id/*.png
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(SynthiaDataset, self).__init__()
self.mode = 'gtFine'
self.images_dir = os.path.join(root, 'leftImg8bit', split)
self.targets_dir = os.path.join(root, self.mode, split)
self.split = split
self.images = []
self.targets = []
self.transforms = transforms
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_id = '{}'.format(file_name.split('_leftImg8bit')[0])
target_suffix = "_gtFine_labelIds"
target_ext = ".png"
target_name = target_id + target_suffix + target_ext
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(os.path.join(target_dir, target_name))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def synthia(root: str,
split: str,
transforms: List[Callable]):
return SynthiaDataset(root=root,
split=split,
transforms=transforms) | 2,133 | 32.34375 | 75 | py |
self-adaptive | self-adaptive-master/datasets/mapillary.py | import os
from PIL import Image
from typing import Callable, Optional, Tuple, List
import torch
class MapillaryDataset(object):
"""
The Mapillary dataset is required to have following folder structure:
mapillary/
training/
v1.2/labels/*.png
images/*.jpg
"""
def __init__(self,
root,
split,
transforms: Optional[Callable] = None):
super(MapillaryDataset, self).__init__()
self.mode = 'gtFine'
# Use only subset of 2000 the training images for val, as inference otherwise takes too long
self.num_images = 2000
self.images = []
self.targets = []
self.transforms = transforms
val_root = os.path.join(root, "training")
labels_root = os.path.join(val_root, "v1.2", "labels")
imgs_root = os.path.join(val_root, "images")
img_names = os.listdir(imgs_root)
for i, img_name in enumerate(img_names):
label_name = img_name.replace(".jpg", ".png")
img_path = os.path.join(imgs_root, img_name)
label_path = os.path.join(labels_root, label_name)
if i < self.num_images:
self.images.append(img_path)
self.targets.append(label_path)
else:
break
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
image = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.images)
def mapillary(root: str,
split: str,
transforms: List[Callable]):
return MapillaryDataset(root=root,
split=split,
transforms=transforms) | 1,962 | 31.716667 | 100 | py |
self-adaptive | self-adaptive-master/loss/semantic_seg.py | import torch
from typing import Dict
class CrossEntropyLoss(torch.nn.Module):
def __init__(self,
ignore_index: int = 255):
super(CrossEntropyLoss, self).__init__()
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduction="none")
self.ignore_index = ignore_index
def forward(self,
output: torch.Tensor,
gt: torch.Tensor):
"""
Args:
output: Probabilities for every pixel with stride of 16
gt: Labeled image at full resolution
Returns:
total_loss: Cross entropy loss
"""
# Compare output and groundtruth at downsampled resolution
gt = gt.long().squeeze(1)
loss = self.criterion(output, gt)
# Compute total loss
total_loss = (loss[gt != self.ignore_index]).mean()
return total_loss
class PSPNetLoss(torch.nn.Module):
def __init__(self,
ignore_index: int = 255,
alpha: float = 0.0):
super(PSPNetLoss, self).__init__()
self.seg_criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
self.cls_criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
self.ignore_index = ignore_index
self.alpha = alpha
def forward(self,
output_dict: Dict[str, torch.Tensor],
gt: torch.Tensor):
"""
Args:
output_dict: Probabilities for every pixel with stride of 16
gt: Labeled image at full resolution
Returns:
total_loss: Cross entropy loss
"""
# Compare output and groundtruth at downsampled resolution
gt = gt.long().squeeze(1)
seg_loss = self.seg_criterion(output_dict['final'], gt)
cls_loss = self.cls_criterion(output_dict['aux'], gt)
total_loss = seg_loss + self.alpha * cls_loss
return total_loss | 1,961 | 30.645161 | 95 | py |
self-adaptive | self-adaptive-master/utils/montecarlo.py | import torch
import numpy as np
from typing import Union, List
class MonteCarloDropout(object):
def __init__(self,
size: Union[List, int],
passes: int = 10,
classes: int = 19):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.vanilla_prediction = torch.zeros(size=(1, size[0], size[1]), device=self.device)
self.vanilla_confidence = torch.zeros(size=(1, size[0], size[1]), device=self.device)
self.mcd_predictions = torch.zeros(size=(passes, size[0], size[1]), device=self.device)
self.mcd_confidences = torch.zeros(size=(passes, size[0], size[1]), device=self.device)
self.softmax = torch.zeros(size=(passes, classes, size[0], size[1]), device=self.device)
self.mean_softmax = None
self.var_softmax = None
self.passes = passes
# Save Dropout layers for checking
self.dropout_layers = []
def enable_dropout(self,
model: torch.nn.Module):
"""
Args:
model: Pytorch model
"""
for m in model.modules():
if m.__class__.__name__.startswith("Dropout"):
m.train()
self.dropout_layers.append(m)
def save_predictions(self,
pass_idx: int,
current_prediction: torch.Tensor,
current_confidence: torch.Tensor):
if type(current_prediction) == torch.Tensor:
# Send tensors to CPU and convert to numpy
current_prediction = current_prediction.squeeze(0).cpu().numpy()
current_confidence = current_confidence.squeeze(0).cpu().numpy()
self.mcd_predictions[pass_idx] = current_prediction
self.mcd_confidences[pass_idx] = current_confidence
def save_softmax(self,
pass_idx: int,
softmax: torch.Tensor):
self.softmax[pass_idx] = softmax
def avg_softmax(self):
# Average softmax over all forward passes
self.mean_softmax = torch.mean(self.softmax, dim=0, keepdim=True)
self.var_softmax = torch.var(self.softmax, dim=0, keepdim=True)
# Get mean confidence and prediction
confidence, prediction = self.mean_softmax.max(dim=1)
return confidence, prediction, self.mean_softmax
def avg_predictions(self):
# Calculate mean and var over multiple MCD predictions
mean_pred = np.mean(self.mcd_predictions, axis=0)
var_pred = np.var(self.mcd_predictions, axis=0)
# Calculate mean and var over multiple MCD confidences
mean_conf = np.mean(self.mcd_confidences, axis=0)
var_conf = np.var(self.mcd_confidences, axis=0)
return {"Mean prediction": mean_pred,
"Variance prediction": var_pred,
"Mean confidence": mean_conf,
"Variance confidence": var_conf}
| 2,978 | 36.2375 | 96 | py |
self-adaptive | self-adaptive-master/utils/modeling.py | import functools
import torch
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def freeze_layers(opts, model: torch.nn.Module):
if len(opts.resnet_layers) != 0 and "resnet" in opts.backbone_name and "deeplab" in opts.arch_type:
if opts.arch_type == "deeplab":
model_name = model.module
elif opts.arch_type == "deeplabv3plus":
model_name = model.module.model
else:
raise ValueError(f"{opts.arch_type} not compatible with resnet layer freezing")
for idx, p in enumerate(model_name.named_parameters()):
if idx <= 3:
p[1].requires_grad = False
else:
break
for layer in opts.resnet_layers:
layer = "layer" + str(layer)
for para in getattr(model_name.backbone, layer).named_parameters():
para[1].requires_grad = False
if len(opts.hrnet_layers) != 0 and "hrnet" in opts.arch_type:
for idx, p in enumerate(model.module.model.named_parameters()):
if idx <= 3:
p[1].requires_grad = False
else:
break
for layer_idx in opts.hrnet_layers:
layer = "transition" + str(layer_idx)
for para in getattr(model.module.model, layer).named_parameters():
para[1].requires_grad = False
layer = "stage" + str(layer_idx + 1)
for para in getattr(model.module.model, layer).named_parameters():
para[1].requires_grad = False
| 1,782 | 40.465116 | 103 | py |
self-adaptive | self-adaptive-master/utils/calibration.py | """
Guo et al.: O Calibration of Modern Neural Networks, 2017, ICML
https://arxiv.org/abs/1706.04599
Code based on implementation of G. Pleiss: https://gist.github.com/gpleiss/0b17bc4bd118b49050056cfcd5446c71
"""
import torch
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import pathlib
class CalibrationMeter(object):
def __init__(self,
device,
n_bins: int = 10,
num_images: int = 500,
num_classes: int = 19):
# Initiate bins
self.device = device
self.num_classes = num_classes
self.num_images = num_images
self.num_bins = n_bins
self.width = 1.0 / n_bins
self.bins = torch.linspace(0, 1, n_bins + 1, device=self.device)
self.bin_centers = np.linspace(0, 1.0 - self.width, n_bins) + self.width / 2
self.bin_uppers = self.bins[1:]
self.bin_lowers = self.bins[:-1]
# Save bins per class
self.scores_per_class = torch.zeros(size=(self.num_classes, self.num_bins), device=self.device)
self.corrects_per_class = torch.zeros_like(self.scores_per_class, device=self.device)
self.ece_per_class = torch.zeros(size=(self.num_classes, 1), device=self.device)
self.class_pixels_total = torch.zeros(size=(self.num_classes, 1), device=self.device)
# Save accuracy and confidence values per class per batch
self.class_acc_per_batch = [torch.zeros(0, device=self.device) for _ in range(self.num_classes)]
self.class_conf_per_batch = [torch.zeros(0, device=self.device) for _ in range(self.num_classes)]
# For whole dataset
self.overall_corrects = torch.from_numpy(np.zeros_like(self.bin_centers)).to(device)
self.overall_scores = torch.from_numpy(np.zeros_like(self.bin_centers)).to(device)
self.overall_ece = 0
def calculate_bins(self,
output: torch.Tensor,
label: torch.Tensor,
mcd: bool = False):
"""
Calculate accuracy and confidence values per class and per image. Then, partition confidences into bins.
This results into accuracy/confidence bins for each class per image.
"""
# Get rid of batch dimension
label = label.squeeze(0)
if mcd:
softmaxes = output
else:
# Logits to predictions
softmaxes = torch.nn.functional.softmax(output, dim=1)
for cls in range(self.num_classes):
# Filter predictions
confidences, predictions = softmaxes.max(dim=1)
predictions[predictions != cls] = 255
# Compute accuracies
class_accuracy = torch.eq(predictions[label == cls], label[label == cls])
class_confidence = confidences[label == cls]
class_pixels = predictions[label == cls].size()[0]
# Partition bins
bin_indices = [class_confidence.ge(bin_lower) * class_confidence.lt(bin_upper) for bin_lower, bin_upper in
zip(self.bins[:-1], self.bins[1:])]
bin_corrects = class_pixels * torch.tensor([torch.mean(class_accuracy[bin_index].float())
for bin_index in bin_indices], device=self.device)
bin_scores = class_pixels * torch.tensor([torch.mean(class_confidence[bin_index].float())
for bin_index in bin_indices], device=self.device)
# Calculate ECE
ece = class_pixels * self._calc_ece(class_accuracy, class_confidence,
bin_lowers=self.bin_lowers, bin_uppers=self.bin_uppers)
# Check nan
bin_corrects[torch.isnan(bin_corrects) == True] = 0
bin_scores[torch.isnan(bin_scores) == True] = 0
self.corrects_per_class[cls] += bin_corrects
self.scores_per_class[cls] += bin_scores
self.ece_per_class[cls] += ece
self.class_pixels_total[cls] += class_pixels
def calculate_mean_over_dataset(self):
for cls in range(self.num_classes):
self.overall_corrects += \
(self.corrects_per_class[cls] / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
self.overall_scores += \
(self.scores_per_class[cls] / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
self.overall_ece += \
(self.ece_per_class[cls].item() / (self.class_pixels_total[cls].item() + 1e-9)) / self.num_classes
def save_data(self,
where: str,
what: str):
"""
Save entire calibration meter object instance for later use.
"""
# Create directory for storing results
pathlib.Path(where).mkdir(parents=True, exist_ok=True)
# Save results
with open(os.path.join(where, what), "wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@staticmethod
def _calc_ece(accuracies, confidence, bin_lowers, bin_uppers):
# Calculate ECE
ece = 0
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidence.gt(bin_lower.item()) * confidence.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidence[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def plot_mean(self):
"""
Plots reliability diagram meant over all classes.
Returns:
Figure
"""
# Calculate gaps
gap = self.overall_scores - self.overall_corrects
# Create figure
fig, ax = plt.subplots(figsize=(9, 9))
plt.grid()
fontsize = 25
# Create bars
confs = plt.bar(self.bin_centers, self.overall_corrects, width=self.width, ec='black')
gaps = plt.bar(self.bin_centers, gap, bottom=self.overall_corrects, color=[1, 0.7, 0.7],
alpha=0.5, width=self.width, hatch='//', edgecolor='r')
plt.plot([0, 1], [0, 1], '--', color='gray')
plt.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='xx-large')
# Clean up
bbox_props = dict(boxstyle="round", fc="lightgrey", ec="brown", lw=2)
plt.text(0.2, 0.75, f"ECE: {np.round_(self.overall_ece, decimals=3)}", ha="center",
va="center", size=fontsize-2, weight='bold', bbox=bbox_props)
plt.title("Reliability Diagram", size=fontsize + 2)
plt.ylabel("Accuracy", size=fontsize)
plt.xlabel("Confidence", size=fontsize)
plt.xlim(0, 1)
plt.ylim(0, 1)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(18)
return fig
def plot_cls_diagrams(self):
"""
Plots for each class a reliability diagram.
Returns:
List of Figures
"""
list_figures = []
for cls in range(self.num_classes):
bin_corrects = self.corrects_per_class[cls].cpu().numpy() / (self.class_pixels_total[cls].cpu().item() + 1e-9)
bin_scores = self.scores_per_class[cls].cpu().numpy() / (self.class_pixels_total[cls].cpu().item() +1e-9)
ece = self.ece_per_class[cls].cpu().item() / (self.class_pixels_total[cls].cpu().item() + 1e-9)
# Calculate gaps
gap = bin_scores - bin_corrects
# Create figure
figure = plt.figure(0, figsize=(8, 8))
plt.grid()
# Create bars
confs = plt.bar(self.bin_centers, bin_corrects, width=self.width, ec='black')
gaps = plt.bar(self.bin_centers, gap, bottom=bin_corrects, color=[1, 0.7, 0.7], alpha=0.5,
width=self.width, hatch='//', edgecolor='r')
plt.plot([0, 1], [0, 1], '--', color='gray')
plt.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='small')
# Clean up
bbox_props = dict(boxstyle="round", fc="lightgrey", ec="brown", lw=2)
plt.text(0.2, 0.85, f"ECE: {np.round_(ece, decimals=3)}", ha="center", va="center", size=20,
weight='bold',
bbox=bbox_props)
plt.title("Reliability Diagram", size=20)
plt.ylabel("Accuracy", size=18)
plt.xlabel("Confidence", size=18)
plt.xlim(0, 1)
plt.ylim(0, 1)
list_figures.append(figure)
# Clear current figure
plt.close(figure)
return list_figures
| 9,011 | 41.309859 | 122 | py |
self-adaptive | self-adaptive-master/utils/dropout.py | from utils.modeling import rsetattr
import torch, math
def add_dropout(model: torch.nn.Module,
dropout_start_perc: float = 0.0,
dropout_stop_perc: float = 1.0,
dropout_prob: float = 0.1):
# Add dropout layers after relu
dropout_cls = torch.nn.Dropout
dropout_prev_modules = (torch.nn.ReLU6, torch.nn.ReLU)
max_pos = len([m for m in model.modules() if isinstance(m, dropout_prev_modules)])
start_pos = math.floor(dropout_start_perc * max_pos)
stop_pos = math.floor(dropout_stop_perc * max_pos)
pos_ind = 0
for m_name, m in model.named_modules():
if isinstance(m, dropout_prev_modules):
pos_ind += 1
if pos_ind >= start_pos and pos_ind <= stop_pos:
rsetattr(model, m_name, torch.nn.Sequential(m, dropout_cls(p=dropout_prob)))
| 854 | 39.714286 | 92 | py |
self-adaptive | self-adaptive-master/utils/distributed.py | import os
import torch
import torch.distributed
def init_process(opts,
gpu: int) -> int:
# Define world size
opts.world_size = opts.gpus
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8888'
# Calculate rank
rank = gpu
# Initiate process group
torch.distributed.init_process_group(backend='nccl',
init_method='env://',
world_size=opts.world_size,
rank=rank)
print(f"{rank + 1}/{opts.world_size} process initialized.\n")
return rank
def clean_up():
torch.distributed.destroy_process_group()
| 702 | 24.107143 | 68 | py |
self-adaptive | self-adaptive-master/utils/metrics.py | # Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
class runningScore():
def __init__(self,
n_classes: int):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self,
label_true: np.ndarray,
label_pred: np.ndarray,
n_class: int):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2
).reshape(n_class, n_class)
return hist
def update(self,
label_trues: np.ndarray,
label_preds: np.ndarray):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""
Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc:": acc,
"Mean Acc :": acc_cls,
"FreqW Acc :t": fwavacc,
"Mean IoU :": mean_iu,
},
cls_iu,
hist,
iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) | 1,963 | 30.174603 | 96 | py |
self-adaptive | self-adaptive-master/utils/self_adapt_norm.py | import torch.nn as nn
from copy import deepcopy
from utils.modeling import *
class SelfAdaptiveNormalization(nn.Module):
def __init__(self,
num_features: int,
unweighted_stats: bool = False,
eps: float = 1e-5,
momentum: float = 0.1,
alpha: float = 0.5,
alpha_train: bool = False,
affine: bool = True,
track_running_stats: bool = True,
training: bool = False,
update_source: bool = True):
super(SelfAdaptiveNormalization, self).__init__()
self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=alpha_train)
self.alpha_train = alpha_train
self.training = training
self.unweighted_stats = unweighted_stats
self.eps = eps
self.update_source = update_source
self.batch_norm = nn.BatchNorm2d(
num_features,
eps,
momentum,
affine,
track_running_stats
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (not self.training and not self.unweighted_stats) or (self.training and self.alpha_train):
if self.alpha_train:
self.alpha.requires_grad_()
# Compute statistics from batch
x_mean = torch.mean(x, dim=(0, 2, 3))
x_var = torch.var(x, dim=(0, 2, 3), unbiased=False)
# Weigh batch statistics with running statistics
alpha = torch.clamp(self.alpha, 0, 1)
weighted_mean = (1 - alpha) * self.batch_norm.running_mean.detach() + alpha * x_mean
weighted_var = (1 - alpha) * self.batch_norm.running_var.detach() + alpha * x_var
# Update running statistics based on momentum
if self.update_source and self.training:
self.batch_norm.running_mean = (1 - self.batch_norm.momentum) * self.batch_norm.running_mean\
+ self.batch_norm.momentum * x_mean
self.batch_norm.running_var = (1 - self.batch_norm.momentum) * self.batch_norm.running_var\
+ self.batch_norm.momentum * x_var
return compute_bn(
x, weighted_mean, weighted_var,
self.batch_norm.weight, self.batch_norm.bias, self.eps
)
return self.batch_norm(x)
def compute_bn(input: torch.Tensor, weighted_mean: torch.Tensor, weighted_var: torch.Tensor,
weight: torch.Tensor, bias: torch.Tensor, eps: float) -> torch.Tensor:
input = (input - weighted_mean[None, :, None, None]) / (torch.sqrt(weighted_var[None, :, None, None] + eps))
input = input * weight[None, :, None, None] + bias[None, :, None, None]
return input
def replace_batchnorm(m: torch.nn.Module,
alpha: float,
update_source_bn: bool = True):
if alpha is None:
alpha = 0.0
for name, child in m.named_children():
if isinstance(child, torch.nn.BatchNorm2d):
wbn = SelfAdaptiveNormalization(num_features=child.num_features,
alpha=alpha, update_source=update_source_bn)
setattr(wbn.batch_norm, "running_mean", deepcopy(child.running_mean))
setattr(wbn.batch_norm, "running_var", deepcopy(child.running_var))
setattr(wbn.batch_norm, "weight", deepcopy(child.weight))
setattr(wbn.batch_norm, "bias", deepcopy(child.bias))
wbn.to(next(m.parameters()).device.type)
setattr(m, name, wbn)
else:
replace_batchnorm(child, alpha=alpha, update_source_bn=update_source_bn)
def reinit_alpha(m: torch.nn.Module,
alpha: float,
device: torch.device,
alpha_train: bool = False):
layers = [module for module in m.modules() if isinstance(module, SelfAdaptiveNormalization)]
for i, layer in enumerate(layers):
layer.alpha = nn.Parameter(torch.tensor(alpha).to(device), requires_grad=alpha_train)
| 4,140 | 43.053191 | 112 | py |
self-adaptive | self-adaptive-master/utils/transforms.py | import torch, random
import torchvision.transforms.functional as F
import torchvision.transforms as tf
import numpy as np
from PIL import Image, ImageFilter
from typing import Tuple, List, Callable
from datasets.labels import convert_ids_to_trainids, convert_trainids_to_ids
class Compose:
def __init__(self,
transforms: List[Callable]):
self.transforms = transforms
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[torch.Tensor, torch.Tensor]:
for transform in self.transforms:
img, gt = transform(img, gt)
return img, gt
class ToTensor:
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[torch.Tensor, torch.Tensor]:
img = F.to_tensor(np.array(img))
gt = torch.from_numpy(np.array(gt)).unsqueeze(0)
return img, gt
class Resize:
def __init__(self,
resize: Tuple[int]):
self.img_resize = tf.Resize(size=resize,
interpolation=Image.BILINEAR)
self.gt_resize = tf.Resize(size=resize,
interpolation=Image.NEAREST)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
img = self.img_resize(img)
gt = self.gt_resize(gt)
return img, gt
class ImgResize:
def __init__(self,
resize: Tuple[int, int]):
self.resize = resize
self.num_pixels = self.resize[0]*self.resize[1]
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if torch.prod(torch.tensor(img.shape[-2:])) > self.num_pixels:
img = torch.nn.functional.interpolate(img.unsqueeze(0), size=self.resize, mode='bilinear').squeeze(0)
return img, gt
class ImgResizePIL:
def __init__(self,
resize: Tuple[int]):
self.resize = resize
self.num_pixels = self.resize[0]*self.resize[1]
def __call__(self,
img: Image) -> Image:
if img.height*img.width > self.num_pixels:
img = img.resize((self.resize[1], self.resize[0]), Image.BILINEAR)
return img
class Normalize:
def __init__(self,
mean: List[float] = [0.485, 0.456, 0.406],
std: List[float] = [0.229, 0.224, 0.225]):
self.norm = tf.Normalize(mean=mean,
std=std)
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
img = self.norm(img)
return img, gt
class RandomHFlip:
def __init__(self,
percentage: float = 0.5):
self.percentage = percentage
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if random.random() < self.percentage:
img = F.hflip(img)
gt = F.hflip(gt)
return img, gt
class RandomResizedCrop:
def __init__(self,
crop_size: List[int]):
self.crop = tf.RandomResizedCrop(size=tuple(crop_size))
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
i, j, h, w = self.crop.get_params(img=img,
scale=self.crop.scale,
ratio=self.crop.ratio)
img = F.resized_crop(img, i, j, h, w, self.crop.size, Image.BILINEAR)
gt = F.resized_crop(gt, i, j, h, w, self.crop.size, Image.NEAREST)
return img, gt
class CenterCrop:
def __init__(self,
crop_size: int):
self.crop = tf.CenterCrop(size=crop_size)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
img = self.crop(img)
gt = self.crop(gt)
return img, gt
class IdsToTrainIds:
def __init__(self,
source: str,
target: str):
self.source = source
self.target = target
self.ids_to_trainids = convert_ids_to_trainids
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
gt = self.ids_to_trainids(gt, source=self.source, target=self.target)
return img, gt
class TrainIdsToIds:
def __init__(self,
source: str,
target: str):
self.source = source
self.target = target
self.trainids_to_ids = convert_trainids_to_ids
def __call__(self,
img: torch.Tensor,
gt: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
gt = self.trainids_to_ids(gt, source=self.source, target=self.target)
return img, gt
class ColorJitter:
def __init__(self, percentage: float = 0.5, brightness: float = 0.3,
contrast: float = 0.3, saturation: float = 0.3, hue: float = 0.1):
self.percentage = percentage
self.jitter = tf.ColorJitter(brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue)
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if random.random() < self.percentage:
img = self.jitter(img)
return img, gt
class MaskGrayscale:
def __init__(self, percentage: float = 0.1):
self.percentage = percentage
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
if self.percentage > random.random():
img = F.to_grayscale(img, num_output_channels=3)
return img, gt
class RandGaussianBlur:
def __init__(self, radius: List[float] = [.1, 2.]):
self.radius = radius
def __call__(self,
img: Image.Image,
gt: Image.Image) -> Tuple[Image.Image, Image.Image]:
radius = random.uniform(self.radius[0], self.radius[1])
img = img.filter(ImageFilter.GaussianBlur(radius))
return img, gt
| 6,474 | 26.553191 | 113 | py |
self-adaptive | self-adaptive-master/optimizer/schedulers.py | '''
Source: https://github.com/meetshah1995/pytorch-semseg
'''
from torch.optim.lr_scheduler import _LRScheduler
import torch
from typing import List
def get_scheduler(scheduler_type: str,
optimizer: torch.optim.Optimizer,
max_iter: int) -> _LRScheduler:
if scheduler_type == "constant":
return ConstantLR(optimizer=optimizer)
elif scheduler_type == "poly":
return PolyLR(optimizer=optimizer,
max_iter=max_iter)
else:
raise ValueError(f"Scheduler {scheduler_type} unknown")
class ConstantLR(_LRScheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
last_epoch: int = -1):
super(ConstantLR, self).__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""
Returns:
lr: Current learning rate based on iteration
"""
return self.base_lrs
class PolyLR(_LRScheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
max_iter: int,
decay_iter: int = 1,
gamma: float = 0.9,
last_epoch: int = -1):
self.max_iter = max_iter
self.decay_iter = decay_iter
self.gamma = gamma
self.factor: float
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""
Returns:
lr: Current learning rate based on iteration
"""
assert self.last_epoch < self.max_iter\
, f"Last epoch is {self.last_epoch} but needs to be smaller than max iter {self.max_iter}"
self.factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma
return [base_lr * self.factor for base_lr in self.base_lrs] | 1,823 | 30.448276 | 102 | py |
drlviz | drlviz-master/distributions.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 11:35:22 2018
@author: edward
"""
import torch.nn as nn
import torch.nn.functional as F
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x):
x = self.linear(x)
return x
def sample(self, x, deterministic):
x = self(x)
probs = F.softmax(x, dim=1)
if deterministic is False:
action = probs.multinomial()
else:
action = probs.max(1, keepdim=True)[1]
return action
def logprobs_and_entropy(self, x, actions):
x = self(x)
log_probs = F.log_softmax(x, dim=1)
probs = F.softmax(x, dim=1)
action_log_probs = log_probs.gather(1, actions)
dist_entropy = -(log_probs * probs).sum(-1).mean()
return action_log_probs, dist_entropy
| 991 | 22.069767 | 58 | py |
drlviz | drlviz-master/reduce.py | import ujson
from random import randint
import numpy as np
import torch
from torch.autograd import Variable
from arguments import parse_game_args
from doom_evaluation import BaseAgent
from environments import DoomEnvironment
from models import CNNPolicy
import base64
import io
from PIL import Image
def gen_classic(selh, file):
params = parse_game_args()
params.scenario = "health_gathering_supreme.cfg"
env = DoomEnvironment(params)
device = torch.device("cuda" if False else "cpu")
num_actions = env.num_actions
network = CNNPolicy(3, num_actions, True, (3, 64, 112)).to(device)
checkpoint = torch.load('models/' + "health_gathering_supreme" + '.pth.tar', map_location=lambda storage, loc: storage)
network.load_state_dict(checkpoint['model'])
agent = BaseAgent(network, params)
ERU = {'env': env, 'agent': agent}
selh = torch.from_numpy(selh).type(torch.FloatTensor)
selh = Variable(selh, volatile=True)
ERU['env'].set_seed(randint(0, 999999999))
ERU['env'].reset()
scores = []
hiddens = []
inputs = []
saliencies = []
actions = []
probabilities = []
health = []
positions = []
orientations = []
velocities = []
items = []
fov = []
w = 0
while not ERU['env'].is_episode_finished():
obsvervation = io.BytesIO()
obs = ERU['env'].get_observation()
temp = ERU['env'].state.screen_buffer
Image.fromarray(temp.transpose(1, 2, 0)).save(obsvervation, format="JPEG")
action, value, action_probs, grads = ERU['agent'].get_action_value_and_probs_zeroes(obs, selh, epsilon=0.0)
hidden = ERU['agent'].model.get_gru_h()
h = ''
for elem in hidden[0][0]:
h += str(elem) + ","
h = h[:-1]
h = h.split(',')
probs = ""
for elem in action_probs[0]:
probs += str(elem) + ","
probs = probs[:-1]
probs = probs.split(',')
sa = io.BytesIO()
t = Image.fromarray(grads, 'L')
t.save(sa, format="JPEG")
scores.append(str(round(ERU['env'].game.get_total_reward(), 2)))
hiddens.append(h)
inputs.append(base64.b64encode(obsvervation.getvalue()))
saliencies.append(base64.b64encode(sa.getvalue()))
actions.append(str(action))
probabilities.append(probs)
health.append(ERU['env'].get_health())
positions.append(ERU['env'].get_pos())
orientations.append(ERU['env'].get_ori())
velocities.append(ERU['env'].get_velo())
items.append(ERU['env'].get_item())
fov.append(ERU['env'].get_fov())
ERU['env'].make_action(int(action))
print('Iteration', w, '/525')
w += 1
result = {'episode0': {
'inputs': inputs,
'actions': actions,
'probabilities': probabilities,
'saliencies': saliencies,
'scores': scores,
'positions': positions,
'health': health,
'hiddens': hiddens,
'orientations': orientations,
'velocities': velocities,
'items': items,
'fov': fov
}
}
with open(file, 'w') as f:
ujson.dump(result, f, indent=4, sort_keys=True)
return result
def remove_all():
return np.full(
shape=512,
fill_value=0.02,
dtype=np.float)
def top(n):
top = [2, 13, 375, 105, 141, 203, 12, 381, 500, 496, 485, 455, 74, 315, 308, 75, 93, 223, 302, 207, 2, 108, 384, 177, 266, 129, 158, 182, 211, 85, 323, 205, 115, 421, 332, 400, 72, 21, 139, 220, 402, 499, 343, 215, 280, 194, 66, 65, 56, 284, 106, 86, 376, 161, 471, 262, 483, 312, 237, 195, 197, 335, 488, 260, 290, 146, 116, 11, 30, 477, 425, 458, 417, 379, 87, 448, 298, 79, 474, 208, 265, 213, 31, 169, 149, 219, 413, 270, 240, 256,
468, 288, 152, 18, 100, 15, 502, 258, 176, 187, 23, 244, 359, 168, 101, 17, 247, 493, 238, 320, 268, 319, 282, 487, 325, 420, 179, 392, 511, 482, 350, 239, 142, 200, 251, 148, 170, 112, 50, 344, 173, 193, 422, 189, 291, 371, 313, 113, 463, 339, 131, 469, 120, 362, 62, 435, 224, 406, 172, 78, 484, 295, 416, 346, 49, 164, 34, 150, 70, 160, 389, 236, 409, 67, 180, 159, 441, 69, 162, 190, 361, 145, 127, 370, 155, 281, 94, 329,
10,
137, 272, 27, 366, 16, 309, 460, 464, 333, 204, 229, 348, 278, 226, 466, 436, 7, 503, 428, 232, 257, 32, 221, 181, 218, 283, 405, 104, 60, 230, 241, 25, 19, 84, 191, 318, 286, 431, 461, 111, 263, 310, 399, 8, 107, 299, 233, 39, 356, 143, 430, 209, 360, 307, 28, 147, 134, 217, 125, 199, 490, 340, 188, 167, 401, 119, 98, 364, 103, 377, 216, 52, 453, 296, 0, 235, 114, 253, 274, 122, 465, 462, 358, 457, 89, 198, 373, 276, 443,
367, 354, 254, 285, 450, 345, 68, 398, 369, 41, 228, 243, 271, 365, 439, 480, 437, 479, 90, 294, 394, 6, 330, 418, 390, 37, 311, 432, 363, 178, 222, 368, 48, 407, 506, 433, 135, 20, 40, 374, 128, 51, 225, 404, 99, 410, 165, 138, 357, 470, 252, 349, 196, 509, 341, 35, 175, 46, 73, 97, 492, 316, 102, 423, 459, 227, 166, 117, 478, 391, 387, 412, 396, 395, 140, 475, 24, 314, 383, 264, 214, 382, 55, 242, 352, 334, 393, 76, 5,
328,
38, 255, 279, 124, 80, 126, 297, 451, 53, 110, 202, 45, 331, 505, 63, 275, 445, 419, 388, 163, 372, 206, 249, 261, 61, 118, 481, 301, 442, 136, 3, 43, 397, 324, 342, 183, 353, 336, 82, 44, 454, 501, 77, 347, 157, 305, 287, 59, 497, 438, 248, 486, 504, 472, 185, 91, 452, 22, 322, 408, 355, 133, 201, 429, 508, 132, 440, 317, 447, 449, 151, 427, 88, 415, 121, 234, 144, 351, 456, 269, 245, 434, 380, 473, 109, 337, 47, 385, 510,
58, 491, 489, 250, 14, 498, 386, 424, 231, 476, 156, 378, 192, 171, 277, 4, 300, 54, 411, 292, 36, 306, 210, 130, 83, 338, 186, 414, 123, 321, 293, 303, 184, 495, 9, 494, 246, 153, 446, 426, 174, 95, 96, 507, 81, 327, 64, 33, 1, 29, 42, 304, 403, 154, 467, 273, 57, 326, 289, 212, 26, 71, 444, 267, 259]
apply_oder(n, top)
def change(n):
ch = [215, 86, 290, 266, 108, 262, 106, 483, 448, 471, 417, 421, 265, 194, 502, 187, 320, 244, 176, 323, 413, 72, 169, 359, 17, 177, 100, 379, 268, 511, 500, 335, 463, 75, 30, 406, 308, 238, 161, 205, 312, 258, 219, 193, 474, 200, 240, 173, 62, 288, 208, 282, 344, 339, 31, 170, 485, 120, 224, 10, 332, 164, 291, 148, 67, 236, 409, 27, 50, 94, 101, 150, 87, 416, 487, 34, 23, 420, 56, 484, 428, 158, 260, 78, 168, 466, 272, 107, 189,
381, 422, 455, 49, 211, 460, 493, 441, 230, 159, 172, 162, 70, 221, 425, 251, 477, 142, 366, 464, 209, 333, 84, 191, 217, 213, 348, 469, 319, 298, 129, 160, 179, 435, 195, 364, 149, 443, 296, 468, 285, 313, 283, 458, 399, 69, 377, 12, 74, 239, 28, 488, 114, 263, 39, 188, 310, 218, 52, 450, 119, 294, 369, 181, 278, 330, 190, 6, 97, 392, 346, 387, 318, 104, 457, 178, 311, 360, 233, 68, 131, 367, 90, 41, 492, 390, 46, 180, 20,
398, 98, 365, 60, 480, 295, 357, 232, 499, 175, 165, 407, 167, 345, 430, 137, 220, 151, 418, 475, 490, 478, 243, 2, 111, 397, 43, 140, 470, 264, 152, 21, 48, 196, 439, 66, 383, 254, 166, 40, 415, 38, 404, 229, 16, 145, 204, 354, 15, 125, 394, 454, 362, 206, 432, 437, 456, 128, 506, 503, 257, 305, 25, 462, 117, 11, 325, 301, 99, 334, 393, 0, 352, 235, 297, 401, 508, 316, 479, 102, 127, 321, 228, 368, 287, 449, 274, 55, 198,
207, 347, 18, 391, 317, 302, 144, 85, 396, 331, 138, 340, 271, 118, 5, 14, 112, 380, 459, 389, 408, 185, 234, 465, 51, 431, 261, 374, 495, 280, 434, 77, 436, 497, 139, 29, 37, 315, 385, 45, 155, 253, 395, 245, 370, 19, 225, 141, 201, 80, 210, 400, 35, 223, 73, 372, 461, 322, 275, 47, 476, 110, 355, 307, 231, 4, 373, 36, 115, 303, 197, 501, 429, 136, 24, 95, 255, 358, 237, 89, 154, 281, 338, 489, 163, 328, 226, 121, 93, 496,
442, 445, 324, 342, 113, 183, 269, 71, 44, 382, 494, 58, 329, 453, 481, 227, 452, 314, 386, 216, 447, 88, 246, 133, 507, 505, 350, 132, 337, 504, 388, 199, 438, 124, 22, 378, 130, 286, 276, 63, 143, 53, 491, 351, 64, 343, 353, 83, 414, 509, 336, 473, 427, 419, 472, 433, 446, 411, 467, 153, 241, 412, 510, 122, 256, 57, 123, 156, 250, 192, 277, 384, 252, 202, 486, 279, 212, 3, 327, 146, 214, 424, 59, 82, 293, 134, 361, 304, 259,
306, 109, 81, 65, 184, 440, 135, 222, 341, 247, 498, 13, 103, 363, 1, 186, 426, 289, 91, 54, 403, 157, 482, 444, 147, 410, 423, 76, 42, 267, 451, 92, 116, 61, 375, 79, 249, 284, 33, 174, 126, 273, 376, 292, 182, 105, 26, 32, 96, 349, 326, 248, 242, 356, 8, 7, 402, 405, 203, 299, 171, 371, 270, 309, 9, 300]
apply_oder(n, ch)
def tsne_1d_projection(n):
proj = [381, 500, 203, 92, 141, 12, 485, 105, 375, 13, 308, 75, 455, 496, 74, 315, 93, 223, 302, 207, 2, 384, 158, 129, 211, 266, 108, 85, 182, 323, 205, 115, 400, 332, 139, 21, 220, 402, 177, 499, 343, 72, 280, 194, 215, 66, 65, 284, 56, 421, 197, 237, 195, 376, 11, 477, 30, 146, 290, 116, 312, 335, 79, 106, 260, 87, 213, 161, 458, 262, 488, 425, 86, 417, 471, 298, 31, 483, 474, 448, 265, 168, 208, 392, 288, 17, 379, 493, 18, 173,
256, 200, 100, 176, 344, 240, 502, 282, 291, 268, 189, 149, 320, 409, 187, 120, 23, 142, 148, 162, 295, 219, 67, 258, 27, 464, 359, 170, 484, 193, 377, 236, 468, 270, 181, 150, 247, 233, 413, 251, 244, 482, 319, 350, 172, 406, 101, 169, 160, 371, 272, 420, 416, 463, 164, 339, 50, 333, 62, 145, 428, 239, 511, 487, 441, 221, 466, 457, 179, 34, 238, 348, 224, 113, 329, 460, 422, 78, 362, 469, 309, 190, 313, 278, 10, 435, 281,
370, 131, 361, 299, 232, 241, 7, 127, 8, 399, 69, 119, 39, 436, 461, 49, 229, 159, 52, 307, 401, 318, 389, 104, 286, 230, 257, 94, 111, 112, 226, 465, 143, 134, 209, 431, 84, 366, 122, 354, 283, 254, 394, 137, 28, 46, 218, 325, 152, 15, 155, 405, 32, 16, 358, 503, 199, 346, 356, 263, 103, 147, 19, 216, 138, 98, 125, 274, 25, 490, 453, 204, 107, 135, 341, 180, 70, 242, 360, 128, 340, 367, 222, 225, 396, 369, 202, 509, 0, 432,
480, 478, 349, 363, 276, 364, 60, 310, 37, 437, 191, 433, 398, 334, 228, 214, 68, 506, 249, 390, 217, 185, 117, 252, 188, 316, 301, 41, 35, 279, 365, 423, 61, 439, 89, 430, 53, 44, 382, 479, 175, 20, 102, 178, 126, 504, 114, 294, 393, 82, 314, 388, 462, 271, 330, 77, 505, 124, 5, 336, 296, 196, 407, 374, 198, 51, 391, 412, 368, 450, 404, 55, 261, 165, 275, 206, 373, 80, 235, 324, 6, 167, 163, 443, 136, 383, 140, 264, 459,
40, 22, 442, 99, 372, 97, 73, 451, 447, 410, 438, 456, 91, 395, 497, 486, 380, 255, 473, 311, 76, 491, 253, 36, 342, 110, 351, 440, 508, 184, 90, 14, 243, 475, 418, 292, 38, 501, 183, 250, 59, 130, 328, 472, 434, 133, 397, 54, 285, 345, 386, 166, 492, 227, 88, 245, 331, 83, 449, 201, 297, 452, 498, 476, 454, 118, 427, 357, 355, 45, 429, 387, 510, 58, 470, 489, 121, 414, 156, 306, 385, 132, 186, 234, 305, 353, 347, 47, 300,
210, 144, 481, 494, 338, 337, 246, 446, 151, 411, 408, 9, 403, 445, 424, 293, 495, 415, 63, 273, 95, 33, 109, 212, 1, 507, 303, 153, 304, 71, 321, 57, 154, 259, 29, 317, 231, 287, 326, 43, 327, 64, 289, 322, 81, 267, 26, 42, 171, 277, 444, 174, 467, 378, 192, 426, 4, 123, 269, 352, 419, 96, 3, 48, 157, 248, 24]
return apply_oder(n, proj)
def apply_oder(n, order):
assert n < 512, "n must be < 512"
mask = remove_all()
for i in range(n):
mask[order[i]] = 1
return mask
if __name__ == '__main__':
# mask = top(20) # This line allows you to keep the top activated 20 elements
# mask = change(20) # This line allows you to keep the top changing 20 elements
mask = tsne_1d_projection(50) # This line allows you to keep the top tsne_1d_projection 50 elements
# mask = remove_all() #This removes all elements.
data = gen_classic(mask, "result.json")
| 11,617 | 64.638418 | 440 | py |
drlviz | drlviz-master/models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 10:53:06 2018
@author: edward
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from distributions import Categorical
# A temporary solution from the master branch.
# https://github.com/pytorch/pytorch/blob/7752fe5d4e50052b3b0bbc9109e599f8157febc0/torch/nn/init.py#L312
# Remove after the next version of PyTorch gets release.
def orthogonal(tensor, gain=1):
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = torch.Tensor(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
q, r = torch.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph.expand_as(q)
if rows < cols:
q.t_()
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
orthogonal(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0)
class FFPolicy(nn.Module):
def __init__(self):
super(FFPolicy, self).__init__()
def forward(self, inputs, states, masks, masktry):
raise NotImplementedError
def act(self, inputs, states, masks, deterministic=False):
value, x, states = self(inputs, states, masks)
action = self.dist.sample(x, deterministic=deterministic)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, action)
return value, action, action_log_probs, states
def evaluate_actions(self, inputs, states, masks, actions, pred_depths=False):
if pred_depths:
value, x, states, depths = self(inputs, states, masks, pred_depths)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, actions)
return value, action_log_probs, dist_entropy, states, depths
else:
value, x, states = self(inputs, states, masks)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, actions)
return value, action_log_probs, dist_entropy, states, None
def get_action_value_and_probs(self, inputs, states, masks, masktry, deterministic=False):
value, x, states = self(inputs, states, masks, masktry)
action = self.dist.sample(x, deterministic=deterministic)
action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, action)
return value, action, F.softmax(self.dist(x), dim=1), states, x
class CNNPolicy(FFPolicy):
def __init__(self, num_inputs, num_actions, use_gru, input_shape):
super(CNNPolicy, self).__init__()
# self.conv1 = nn.Conv2d(num_inputs, 32, 8, stride=4)
# self.relu1 = nn.ReLU(True)
# self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
# self.relu2 = nn.ReLU(True)
# self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
# self.relu3 = nn.ReLU()
self.h = None
self.conv_head = nn.Sequential(nn.Conv2d(num_inputs, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
conv_input = torch.autograd.Variable(torch.randn((1,) + input_shape))
self.conv_out_size = self.conv_head(conv_input).nelement()
self.hidden_size = 512
self.linear1 = nn.Linear(self.conv_out_size, self.hidden_size)
if use_gru:
self.gru = nn.GRUCell(512, 512)
self.critic_linear = nn.Linear(512, 1)
self.dist = Categorical(512, num_actions)
self.eval()
self.reset_parameters()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
def reset_parameters(self):
self.apply(weights_init)
relu_gain = nn.init.calculate_gain('relu')
for i in range(0, 6, 2):
self.conv_head[i].weight.data.mul_(relu_gain)
self.linear1.weight.data.mul_(relu_gain)
if hasattr(self, 'gru'):
orthogonal(self.gru.weight_ih.data)
orthogonal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
if self.dist.__class__.__name__ == "DiagGaussian":
self.dist.fc_mean.weight.data.mul_(0.01)
def forward(self, inputs, states, masks, masktry, pred_depth=False):
x = self.conv_head(inputs * (1.0 / 255.0))
x = x.view(-1, self.conv_out_size)
x = self.linear1(x)
x = F.relu(x)
if hasattr(self, 'gru'):
if inputs.size(0) == states.size(0):
x = states = self.gru(x, states * masks)
if len(masktry) > 0:
x = states = states * masktry
self.h = x
else:
x = x.view(-1, states.size(0), x.size(1))
masks = masks.view(-1, states.size(0), 1)
outputs = []
for i in range(x.size(0)):
hx = states = self.gru(x[i], states * masks[i])
outputs.append(hx)
x = torch.cat(outputs, 0)
return self.critic_linear(x), x, states
#
# def get_cnn_w(self):
# a = self.conv1.cpu().weight.data
# b = self.conv2.cpu().weight.data
# c = self.conv3.cpu().weight.data
#
# self.conv1.cuda()
# self.conv2.cuda()
# self.conv3.cuda()
# return [a, b, c]
#
# def get_cnn_f(self):
# a = self.x1.cpu().data.numpy()
# b = self.x2.cpu().data.numpy()
# c = self.x3.cpu().data.numpy()
#
# return [a, b, c]
#
def get_gru_h(self):
return [self.h.cpu().data.numpy()]
class CNNDepthPolicy(FFPolicy):
def __init__(self, num_inputs, num_actions, use_gru, input_shape):
super(CNNDepthPolicy, self).__init__()
self.conv_head = nn.Sequential(nn.Conv2d(num_inputs, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
self.depth_head = nn.Conv2d(32, 8, 1, 1)
conv_input = torch.autograd.Variable(torch.randn((1,) + input_shape))
print(conv_input.size(), self.conv_head(conv_input).size())
self.conv_out_size = self.conv_head(conv_input).nelement()
self.linear1 = nn.Linear(self.conv_out_size, 512)
if use_gru:
self.gru = nn.GRUCell(512, 512)
self.critic_linear = nn.Linear(512, 1)
self.dist = Categorical(512, num_actions)
self.train()
self.reset_parameters()
@property
def state_size(self):
if hasattr(self, 'gru'):
return 512
else:
return 1
def reset_parameters(self):
self.apply(weights_init)
relu_gain = nn.init.calculate_gain('relu')
for i in range(0, 6, 2):
self.conv_head[i].weight.data.mul_(relu_gain)
self.linear1.weight.data.mul_(relu_gain)
if hasattr(self, 'gru'):
orthogonal(self.gru.weight_ih.data)
orthogonal(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
if self.dist.__class__.__name__ == "DiagGaussian":
self.dist.fc_mean.weight.data.mul_(0.01)
def forward(self, inputs, states, masks, pred_depth=False):
x = self.conv_head(inputs * (1.0 / 255.0))
if pred_depth:
depth = self.depth_head(x)
x = x.view(-1, self.conv_out_size)
x = self.linear1(x)
x = F.relu(x)
if hasattr(self, 'gru'):
if inputs.size(0) == states.size(0):
x = states = self.gru(x, states * masks)
else:
x = x.view(-1, states.size(0), x.size(1))
masks = masks.view(-1, states.size(0), 1)
outputs = []
for i in range(x.size(0)):
hx = states = self.gru(x[i], states * masks[i])
outputs.append(hx)
x = torch.cat(outputs, 0)
if pred_depth:
return self.critic_linear(x), x, states, depth
else:
return self.critic_linear(x), x, states
if __name__ == '__main__':
depth_model = CNNDepthPolicy(3, 8, False, (3, 64, 112))
example_input = torch.autograd.Variable(torch.randn(1, 3, 64, 112))
c, x, s, d = depth_model(example_input, None, torch.autograd.Variable(torch.Tensor([1])), True)
d.size()
conv_head = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())
step1 = nn.Conv2d(3, 32, 8, stride=4)(example_input)
step2 = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2))(example_input)
step3 = nn.Sequential(nn.Conv2d(3, 32, 8, stride=4),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 32, 3, stride=1),
nn.ReLU())(example_input)
print('Step1', step1.size())
print('Step2', step2.size())
print('Step3', step3.size())
| 10,104 | 33.370748 | 104 | py |
drlviz | drlviz-master/doom_evaluation.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 14:31:17 2018
@author: edward
"""
if __name__ == '__main__': # changes backend for animation tests
import matplotlib
matplotlib.use("Agg")
import numpy as np
from collections import deque
from moviepy.editor import ImageSequenceClip
from environments import DoomEnvironment
import torch
from torch import Tensor
from torch.autograd import Variable
from arguments import parse_game_args
from multi_env import MultiEnvs
from models import CNNPolicy
import matplotlib.pyplot as plt
class BaseAgent(object):
def __init__(self, model, params):
self.model = model
self.cuda = params.cuda
self.gradients = None
self.step = 0
# self.update_relus()
if params.num_stack > 1:
self.exp_size = params.num_stack
self.short_term_memory = deque()
self.state = Variable(torch.zeros(1, model.state_size), volatile=True)
self.mask = Variable(Tensor([1.0]), volatile=True)
print(self.mask)
if params.cuda:
self.state = self.state.cuda()
self.mask = self.mask.cuda()
def get_action(self, observation, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation), volatile=True).unsqueeze(0)
if self.cuda:
print('la>')
observation = observation.cuda()
_, action, _, self.state = self.model.act(observation, self.state, self.mask, deterministic=True)
return action.cpu().data.numpy()[0, 0]
def get_action_value_and_probs(self, observation, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation).unsqueeze(0), requires_grad=True)
if self.cuda:
observation = observation.cuda()
value, action, probs, self.state, x = self.model.get_action_value_and_probs(observation, self.state, self.mask, [], deterministic=True)
self.model.zero_grad()
te = probs.cpu().data.numpy()
one_hot_output = torch.cuda.FloatTensor(1, x.size()[-1]).zero_()
one_hot_output[0][te.argmax()] = 1
probs = Variable(probs.data, requires_grad=True)
x.backward(gradient=one_hot_output)
x.detach_()
grads = observation.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 254
grads = grads.astype(np.int8)
return action.cpu().data.numpy()[0, 0], value.cpu().data.numpy(), probs.cpu().data.numpy(), grads
def get_action_value_and_probs_zeroes(self, observation, mask2, epsilon=0.0):
if hasattr(self, 'short_term_memory'):
observation = self._prepare_observation(observation)
observation = Variable(torch.from_numpy(observation).unsqueeze(0), requires_grad=True)
if self.cuda:
observation = observation.cuda()
value, action, probs, self.state, x = self.model.get_action_value_and_probs(observation, self.state, self.mask, mask2, deterministic=True)
self.model.zero_grad()
# te = probs.cpu().data.numpy()
# one_hot_output = torch.cuda.FloatTensor(1, x.size()[-1]).zero_()
# one_hot_output[0][te.argmax()] = 1
# probs = Variable(probs.data, requires_grad=True)
x.backward(gradient=x)
x.detach_()
grads = observation.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0, 1)
grads.transpose_(1, 2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 254
grads = grads.astype(np.int8)
return action.cpu().data.numpy()[0, 0], value.cpu().data.numpy(), probs.cpu().data.numpy(), grads
def reset(self):
"""
reset the models hidden layer when starting a new rollout
"""
if hasattr(self, 'short_term_memory'):
self.short_term_memory = deque()
self.state = Variable(torch.zeros(1, self.model.state_size), volatile=True)
if self.cuda:
self.state = self.state.cuda()
self.step = 0
def _prepare_observation(self, observation):
"""
As the network expects an input of n frames, we must store a small
short term memory of frames. At input this is completely empty so
I pad with the first observations 4 times
"""
if len(self.short_term_memory) == 0:
for _ in range(self.exp_size):
self.short_term_memory.append(observation)
self.short_term_memory.popleft()
self.short_term_memory.append(observation)
return np.vstack(self.short_term_memory)
def get_step(self):
return self.step
def eval_model(model, params, logger, step, train_iters, num_games):
env = DoomEnvironment(params)
agent = BaseAgent(model, params)
eval_agent(agent, env, logger, params, step, train_iters, num_games)
def eval_agent(agent, env, logger, params, step, train_iters, num_games=10):
"""
Evaluates an agents performance in an environment Two metrics are
computed: number of games suceeded and average total reward.
"""
# TODO: Back up the enviroment so the agent can start where it left off
best_obs = None
worst_obs = None
best_reward = -10000
worst_reward = 100000
accumulated_rewards = 0.0
reward_list = []
time_list = []
for game in range(num_games):
env.reset()
agent.reset()
k = 0
rewards = []
obss = []
while not env.is_episode_finished():
obs = env.get_observation()
action = agent.get_action(obs, epsilon=0.0)
reward = env.make_action(action)
rewards.append(reward)
if not params.norm_obs:
obs = obs * (1.0 / 255.0)
obss.append(obs)
k += 1
time_list.append(k)
reward_list.append(env.get_total_reward())
if env.get_total_reward() > best_reward:
best_obs = obss
best_reward = env.get_total_reward()
if env.get_total_reward() < worst_reward:
worst_obs = obss
worst_reward = env.get_total_reward()
accumulated_rewards += env.get_total_reward()
write_movie(params, logger, best_obs, step, best_reward)
write_movie(params, logger, worst_obs, step + 1, worst_reward)
logger.write('Step: {:0004}, Iter: {:000000008} Eval mean reward: {:0003.3f}'.format(step, train_iters, accumulated_rewards / num_games))
logger.write('Step: {:0004}, Game rewards: {}, Game times: {}'.format(step, reward_list, time_list))
def write_movie(params, logger, observations, step, score):
observations = [o.transpose(1, 2, 0) * 255.0 for o in observations]
clip = ImageSequenceClip(observations, fps=int(30 / params.frame_skip))
output_dir = logger.get_eval_output()
clip.write_videofile('{}eval{:0004}_{:00005.0f}.mp4'.format(output_dir, step, score * 100))
if __name__ == '__main__':
# Test to improve movie with action probs, values etc
params = parse_game_args()
params.norm_obs = False
params.recurrent_policy = True
envs = MultiEnvs(params.simulator, 1, 1, params)
obs_shape = envs.obs_shape
obs_shape = (obs_shape[0] * params.num_stack, *obs_shape[1:])
model = CNNPolicy(obs_shape[0], envs.num_actions, params.recurrent_policy, obs_shape)
env = DoomEnvironment(params)
agent = BaseAgent(model, params)
env.reset()
agent.reset()
rewards = []
obss = []
actions = []
action_probss = []
values = []
while not env.is_episode_finished():
obs = env.get_observation()
# action = agent.get_action(obs, epsilon=0.0)
action, value, action_probs = agent.get_action_value_and_probs(obs, epsilon=0.0)
# print(action)
reward = env.make_action(action)
rewards.append(reward)
obss.append(obs)
actions.append(actions)
action_probss.append(action_probs)
values.append(value)
value_queue = deque()
reward_queue = deque()
for i in range(64):
value_queue.append(0.0)
reward_queue.append(0.0)
import matplotlib.animation as manimation
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Edward Beeching',
comment='First movie with data')
writer = FFMpegWriter(fps=7.5, metadata=metadata)
# plt.style.use('seaborn-paper')
fig = plt.figure(figsize=(16, 9))
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan=6, rowspan=4)
ax2 = plt.subplot2grid((6, 6), (4, 3), colspan=3, rowspan=2)
ax3 = plt.subplot2grid((6, 6), (4, 0), colspan=3, rowspan=1)
ax4 = plt.subplot2grid((6, 6), (5, 0), colspan=3, rowspan=1)
# World plot
im = ax1.imshow(obs.transpose(1, 2, 0) / 255.0)
ax1.axis('off')
# Action plot
bar_object = ax2.bar('L, R, F, B, L + F, L + B, R + F, R + B'.split(','), action_probs.tolist()[0])
ax2.set_title('Action Probabilities', position=(0.5, 0.85))
# plt.title('Action probabilities')
# ax2.axis('on')
ax2.set_ylim([-0.01, 1.01])
# values
values_ob, = ax3.plot(value_queue)
ax3.set_title('State Values', position=(0.1, 0.05))
ax3.set_ylim([np.min(np.stack(values)) - 0.2, np.max(np.stack(values)) + 0.2])
ax3.get_xaxis().set_visible(False)
# plt.title('State values')
rewards_ob, = ax4.plot(reward_queue)
ax4.set_title('Rewards', position=(0.07, 0.05))
# plt.title('Reward values')
ax4.set_ylim([-0.01, 1.0])
fig.tight_layout()
print('writing')
with writer.saving(fig, "writer_test.mp4", 100):
for observation, action_probs, value, reward in zip(obss, action_probss, values, rewards):
im.set_array(observation.transpose(1, 2, 0) / 255.0)
for b, v in zip(bar_object, action_probs.tolist()[0]):
b.set_height(v)
value_queue.popleft()
value_queue.append(value[0, 0])
reward_queue.popleft()
reward_queue.append(reward)
values_ob.set_ydata(value_queue)
rewards_ob.set_ydata(reward_queue)
writer.grab_frame()
| 10,654 | 32.296875 | 146 | py |
spyn-repr | spyn-repr-master/spn/factory.py | from spn.linked.spn import Spn as SpnLinked
from spn.linked.layers import Layer as LayerLinked
from spn.linked.layers import SumLayer as SumLayerLinked
from spn.linked.layers import ProductLayer as ProductLayerLinked
from spn.linked.layers import CategoricalInputLayer
from spn.linked.layers import CategoricalSmoothedLayer \
as CategoricalSmoothedLayerLinked
from spn.linked.layers import CategoricalIndicatorLayer \
as CategoricalIndicatorLayerLinked
from spn.linked.layers import CategoricalCLInputLayer \
as CategoricalCLInputLayerLinked
from spn.linked.nodes import Node
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.nodes import CLTreeNode
from spn.linked.nodes import ConstantNode
from spn.theanok.layers import SumLayer_logspace as SumLayerTheanok
from spn.theanok.layers import ProductLayer_logspace as ProductLayerTheanok
from spn.theanok.layers import TheanokLayer as LayerTheanok
from spn.theanok.layers import InputLayer_logspace as InputLayerTheanok
from spn.theanok.spn import BlockLayeredSpn
from spn.utils import pairwise
from spn import INT_TYPE
import numpy
from math import ceil
from theano import config
import scipy.sparse
import sklearn.preprocessing
import random
import itertools
from collections import deque
from collections import defaultdict
import dataset
import logging
class SpnFactory(object):
"""
WRITEME
"""
#####################################################
#
#####################################################
@classmethod
def linked_kernel_density_estimation(cls,
n_instances,
features,
node_dict=None,
alpha=0.1
# ,batch_size=1,
# sparse=False
):
"""
WRITEME
"""
n_features = len(features)
# the top one is a sum layer with a single node
root_node = SumNode()
root_layer = SumLayerLinked([root_node])
# second one is a product layer with n_instances nodes
product_nodes = [ProductNode() for i in range(n_instances)]
product_layer = ProductLayerLinked(product_nodes)
# linking them to the root node
for prod_node in product_nodes:
root_node.add_child(prod_node, 1. / n_instances)
# last layer can be a categorical smoothed input
# or sum_layer + categorical indicator input
input_layer = None
layers = None
n_leaf_nodes = n_features * n_instances
if node_dict is None:
# creating a sum_layer with n_leaf_nodes
sum_nodes = [SumNode() for i in range(n_leaf_nodes)]
# store them into a layer
sum_layer = SumLayerLinked(sum_nodes)
# linking them to the products above
for i, prod_node in enumerate(product_nodes):
for j in range(n_features):
# getting the next n_features nodes
prod_node.add_child(sum_nodes[i * n_features + j])
# now creating the indicator nodes
input_layer = \
CategoricalIndicatorLayerLinked(vars=features)
# linking the sum nodes to the indicator vars
for i, sum_node in enumerate(sum_nodes):
# getting the feature id
j = i % n_features
# and thus its number of values
n_values = features[j]
# getting the indices of indicators
start_index = sum(features[:j])
end_index = start_index + n_values
indicators = [node for node
in input_layer.nodes()][start_index:end_index]
for ind_node in indicators:
sum_node.add_child(ind_node, 1. / n_values)
# storing levels
layers = [sum_layer, product_layer,
root_layer]
else:
# create a categorical smoothed layer
input_layer = \
CategoricalSmoothedLayerLinked(vars=features,
node_dicts=node_dict,
alpha=alpha)
# it shall contain n_leaf_nodes nodes
smooth_nodes = list(input_layer.nodes())
assert len(smooth_nodes) == n_leaf_nodes
# linking it
for i, prod_node in enumerate(product_nodes):
for j in range(n_features):
# getting the next n_features nodes
prod_node.add_child(smooth_nodes[i * n_features + j])
# setting the used levels
layers = [product_layer, root_layer]
# create the spn from levels
kern_spn = SpnLinked(input_layer, layers)
return kern_spn
@classmethod
def linked_naive_factorization(cls,
features,
node_dict=None,
alpha=0.1):
"""
WRITEME
"""
n_features = len(features)
# create an input layer
input_layer = None
layers = None
# first layer is a product layer with n_feature children
root_node = ProductNode()
root_layer = ProductLayerLinked([root_node])
# second is a sum node on an indicator layer
if node_dict is None:
# creating sum nodes
sum_nodes = [SumNode() for i in range(n_features)]
# linking to the root
for node in sum_nodes:
root_node.add_child(node)
# store into a level
sum_layer = SumLayerLinked(sum_nodes)
# now create an indicator layer
input_layer = CategoricalIndicatorLayerLinked(vars=features)
# and linking it
# TODO make this a function
for i, sum_node in enumerate(sum_nodes):
# getting the feature id
j = i % n_features
# and thus its number of values
n_values = features[j]
# getting the indices of indicators
start_index = sum(features[:j])
end_index = start_index + n_values
indicators = [node for node
in input_layer.nodes()][start_index:end_index]
for ind_node in indicators:
sum_node.add_child(ind_node, 1. / n_values)
# collecting layers
layers = [sum_layer, root_layer]
# or a categorical smoothed layer
else:
input_layer = CategoricalSmoothedLayerLinked(vars=features,
node_dicts=node_dict,
alpha=alpha)
# it shall contain n_features nodes
smooth_nodes = list(input_layer.nodes())
assert len(smooth_nodes) == n_features
for node in smooth_nodes:
root_node.add_child(node)
# set layers accordingly
layers = [root_layer]
# build the spn
naive_fact_spn = SpnLinked(input_layer, layers)
return naive_fact_spn
@classmethod
def linked_random_spn_top_down(cls,
vars,
n_layers,
n_max_children,
n_scope_children,
max_scope_split,
merge_prob=0.5,
rand_gen=None):
"""
WRITEME
"""
def cluster_scopes(scope_list):
cluster_dict = {}
for i, var in enumerate(scope_list):
cluster_dict[var] += {i}
return cluster_dict
def cluster_set_scope(scope_list):
return {scope for scope in scope_list}
def link_leaf_to_input_layer(sum_leaf,
scope_var,
input_layer,
rand_gen):
for indicator_node in input_layer.nodes():
if indicator_node.var == scope_var:
rand_weight = rand_gen.random()
sum_leaf.add_child(indicator_node, rand_weight)
# print(sum_leaf, indicator_node, rand_weight)
# normalizing
sum_leaf.normalize()
#
# creating a product layer
#
def build_product_layer(parent_layer,
parent_scope_list,
n_max_children,
n_scope_children,
input_layer,
rand_gen):
# grouping the scopes of the parents
scope_clusters = cluster_set_scope(parent_scope_list)
# for each scope add a fixed number of children
children_lists = {scope: [ProductNode(var_scope=scope)
for i in range(n_scope_children)]
for scope in scope_clusters}
# counting which node is used
children_counts = {scope: [0 for i in range(n_scope_children)]
for scope in scope_clusters}
# now link those randomly to their parent
for parent, scope in zip(parent_layer.nodes(), parent_scope_list):
# only for nodes not becoming leaves
if len(scope) > 1:
# sampling at most n_max_children from those in the same
# scope
children_scope_list = children_lists[scope]
sample_length = min(
len(children_scope_list), n_max_children)
sampled_ids = rand_gen.sample(range(n_scope_children),
sample_length)
sampled_children = [None for i in range(sample_length)]
for i, id in enumerate(sampled_ids):
# getting the sampled child
sampled_children[i] = children_scope_list[id]
# updating its counter
children_counts[scope][id] += 1
for child in sampled_children:
# parent is a sum layer, we must set a random weight
rand_weight = rand_gen.random()
parent.add_child(child, rand_weight)
# we can now normalize it
parent.normalize()
else:
# binding the node to the input layer
(scope_var,) = scope
link_leaf_to_input_layer(parent,
scope_var,
input_layer,
rand_gen)
# pruning those children never used
for scope in children_lists.keys():
children_scope_list = children_lists[scope]
scope_counts = children_counts[scope]
used_children = [child
for count, child in zip(scope_counts,
children_scope_list)
if count > 0]
children_lists[scope] = used_children
# creating the layer and new scopelist
# print('children list val', children_lists.values())
children_list = [child
for child in
itertools.chain.from_iterable(
children_lists.values())]
scope_list = [key
for key, child_list in children_lists.items()
for elem in child_list]
# print('children list', children_list)
# print('scope list', scope_list)
prod_layer = ProductLayerLinked(children_list)
return prod_layer, scope_list
def build_sum_layer(parent_layer,
parent_scope_list,
rand_gen,
max_scope_split=-1,
merge_prob=0.5):
# keeping track of leaves
# leaf_props = []
scope_clusters = cluster_set_scope(parent_scope_list)
# looping through all the parent nodes and their scopes
# in order to decompose their scope
dec_scope_list = []
for scope in parent_scope_list:
# decomposing their scope into k random pieces
k = len(scope)
if 1 < max_scope_split <= len(scope):
k = rand_gen.randint(2, max_scope_split)
shuffled_scope = list(scope)
rand_gen.shuffle(shuffled_scope)
dec_scopes = [frozenset(shuffled_scope[i::k])
for i in range(k)]
dec_scope_list.append(dec_scopes)
# if a decomposed scope consists of only one var, generate a
# leaf
# leaves = [(parent, (dec_scope,))
# for dec_scope in dec_scopes if len(dec_scope) == 1]
# leaf_props.extend(leaves)
# generating a unique decomposition
used_decs = {}
children_list = []
scope_list = []
for parent, decs in zip(parent_layer.nodes(),
dec_scope_list):
merge_count = 0
for scope in decs:
sum_node = None
try:
rand_perc = rand_gen.random()
if (merge_count < len(decs) - 1 and
rand_perc > merge_prob):
sum_node = used_decs[scope]
merge_count += 1
else:
raise Exception()
except:
# create a node for it
sum_node = SumNode(var_scope=scope)
children_list.append(sum_node)
scope_list.append(scope)
used_decs[scope] = sum_node
parent.add_child(sum_node)
# unique_dec = {frozenset(dec) for dec in
# itertools.chain.from_iterable(dec_scope_list)}
# print('unique dec', unique_dec)
# building a dict scope->child
# children_dict = {scope: SumNode() for scope in unique_dec}
# now linking parents to their children
# for parent, scope in zip(parent_layer.nodes(),
# parent_scope_list):
# dec_scopes = dec_scope_list[scope]
# for dec in dec_scopes:
# retrieving children
# adding it
# parent.add_child(children_dict[dec])
# we already have the nodes and their scopes
# children_list = [child for child in children_dict.values()]
# scope_list = [scope for scope in children_dict.keys()]
sum_layer = SumLayerLinked(nodes=children_list)
return sum_layer, scope_list
# if no generator is provided, create a new one
if rand_gen is None:
rand_gen = random.Random()
# create input layer
# _vars = [2, 3, 2, 2, 4]
input_layer = CategoricalIndicatorLayerLinked(vars=vars)
# create root layer
full_scope = frozenset({i for i in range(len(vars))})
root = SumNode(var_scope=full_scope)
root_layer = SumLayerLinked(nodes=[root])
last_layer = root_layer
# create top scope list
last_scope_list = [full_scope]
layers = [root_layer]
layer_count = 0
stop_building = False
while not stop_building:
# checking for early termination
# this one leads to split product nodes into leaves
if layer_count >= n_layers:
print('Max level reached, trying to stop')
max_scope_split = -1
# build a new layer alternating types
if isinstance(last_layer, SumLayerLinked):
print('Building product layer')
last_layer, last_scope_list = \
build_product_layer(last_layer,
last_scope_list,
n_max_children,
n_scope_children,
input_layer,
rand_gen)
elif isinstance(last_layer, ProductLayerLinked):
print('Building sum layer')
last_layer, last_scope_list = \
build_sum_layer(last_layer,
last_scope_list,
rand_gen,
max_scope_split,
merge_prob)
# testing for more nodes to expand
if last_layer.n_nodes() == 0:
print('Stop building')
stop_building = True
else:
layers.append(last_layer)
layer_count += 1
# checking for early termination
# if not stop_building:
# if isinstance(last_layer, ProductLayerLinked):
# building a sum layer splitting everything into one
# length scopes
# last_sum_layer, last_scope_list = \
# build_sum_layer(last_layer,
# last_scope_list,
# rand_gen,
# max_scope_split=-1)
# then linking each node to the input layer
# for sum_leaf, scope in zip(last_sum_layer.nodes(),
# last_scope_list):
# (scope_var,) = scope
# link_leaf_to_input_layer(sum_leaf,
# scope_var,
# input_layer,
# rand_gen)
# elif isinstance(last_layer, SumLayerLinked):
# pass
# print('LAYERS ', len(layers), '\n')
# for i, layer in enumerate(layers):
# print('LAYER ', i)
# print(layer)
# print('\n')
spn = SpnLinked(input_layer=input_layer,
layers=layers[::-1])
# testing
# scope_list = [
# frozenset({1, 3, 4}), frozenset({2, 0}), frozenset({1, 3, 4})]
# sum_layer = SumLayerLinked(nodes=[SumNode(), SumNode(), SumNode()])
# prod_layer, scope_list = build_product_layer(
# sum_layer, scope_list, 2, 3, input_layer, rand_gen)
# sum_layer1, scope_list_2 = build_sum_layer(prod_layer,
# scope_list,
# rand_gen,
# max_scope_split=2
# )
# prod_layer_2, scope_list_3 = build_product_layer(sum_layer1,
# scope_list_2,
# 2,
# 3,
# input_layer,
# rand_gen)
# create spn from layers
# spn = SpnLinked(input_layer=input_layer,
# layers=[prod_layer_2, sum_layer1,
# prod_layer, sum_layer, root_layer])
return spn
@classmethod
def layered_linked_spn(cls, root_node):
"""
Given a simple linked version (parent->children),
returns a layered one (linked + layers)
"""
layers = []
root_layer = None
input_nodes = []
layer_nodes = []
input_layer = None
# layers.append(root_layer)
previous_level = None
# collecting nodes to visit
open = deque()
next_open = deque()
closed = set()
open.append(root_node)
while open:
# getting a node
current_node = open.popleft()
current_id = current_node.id
# has this already been seen?
if current_id not in closed:
closed.add(current_id)
layer_nodes.append(current_node)
# print('CURRENT NODE')
# print(current_node)
# expand it
for child in current_node.children:
# only for non leaf nodes
if (isinstance(child, SumNode) or
isinstance(child, ProductNode)):
next_open.append(child)
else:
# it must be an input node
if child.id not in closed:
input_nodes.append(child)
closed.add(child.id)
# open is now empty, but new open not
if (not open):
# swap them
open = next_open
next_open = deque()
# and create a new level alternating type
if previous_level is None:
# it is the first level
if isinstance(root_node, SumNode):
previous_level = SumLayerLinked([root_node])
elif isinstance(root_node, ProductNode):
previous_level = ProductLayerLinked([root_node])
elif isinstance(previous_level, SumLayerLinked):
previous_level = ProductLayerLinked(layer_nodes)
elif isinstance(previous_level, ProductLayerLinked):
previous_level = SumLayerLinked(layer_nodes)
layer_nodes = []
layers.append(previous_level)
#
# finishing layers
#
#
# checking for CLTreeNodes
cltree_leaves = False
for node in input_nodes:
if isinstance(node, CLTreeNode):
cltree_leaves = True
break
if cltree_leaves:
input_layer = CategoricalCLInputLayerLinked(input_nodes)
else:
# otherwiise assuming all input nodes are homogeneous
if isinstance(input_nodes[0], CategoricalSmoothedNode):
# print('SMOOTH LAYER')
input_layer = CategoricalSmoothedLayerLinked(input_nodes)
elif isinstance(input_nodes[0], CategoricalIndicatorNode):
input_layer = CategoricalIndicatorLayerLinked(input_nodes)
spn = SpnLinked(input_layer=input_layer,
layers=layers[::-1])
return spn
@classmethod
def pruned_spn_from_slices(cls, node_assoc, building_stack, logger=None):
"""
WRITEME
"""
if logger is None:
logger = logging.getLogger(__name__)
# traversing the building stack
# to link and prune nodes
for build_node in reversed(building_stack):
# current node
current_id = build_node.id
# print('+ Current node: %d', current_id)
current_children_slices = build_node.children
# print('\tchildren: %r', current_children_slices)
current_children_weights = build_node.weights
# print('\tweights: %r', current_children_weights)
# retrieving corresponding node
node = node_assoc[current_id]
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', current_id)
# getting children
for child_slice, child_weight in zip(current_children_slices,
current_children_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
child_id = child_slice.id
child_node = node_assoc[child_id]
# print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
node.add_child(grand_child,
grand_child_weight *
child_weight)
else:
logging.debug('+++ Adding it as child: %d',
child_node.id)
node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', current_id)
# linking children
for child_slice in current_children_slices:
child_id = child_slice.id
child_node = node_assoc[child_id]
# checking for alternating type
if isinstance(child_node, ProductNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child in child_node.children:
node.add_child(grand_child)
else:
node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
# this is superfluous, returning a pointer to the root
root_build_node = building_stack[0]
return node_assoc[root_build_node.id]
@classmethod
def pruned_spn_from_scopes(cls, scope_assoc, building_stack, logger=None):
"""
WRITEME
"""
build_node = None
node = None
node_assoc = None
#
# FIXME: this is just a stub, it does not even compile
#
if logger is None:
logger = logging.getLogger(__name__)
# traversing the building stack
# to link and prune nodes
for build_scope in reversed(building_stack):
# current node
current_id = build_scope.id
# print('+ Current node: %d', current_id)
current_children_slices = build_node.children
# print('\tchildren: %r', current_children_slices)
current_children_weights = build_node.weights
# print('\tweights: %r', current_children_weights)
# retrieving corresponding node
scope_node = scope_assoc[current_id]
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', current_id)
# getting children
for child_slice, child_weight in zip(current_children_slices,
current_children_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
child_id = child_slice.id
child_node = node_assoc[child_id]
# print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
node.add_child(grand_child,
grand_child_weight *
child_weight)
else:
logging.debug('+++ Adding it as child: %d',
child_node.id)
node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', current_id)
# linking children
for child_slice in current_children_slices:
child_id = child_slice.id
child_node = node_assoc[child_id]
# checking for alternating type
if isinstance(child_node, ProductNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child in child_node.children:
node.add_child(grand_child)
else:
node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
# this is superfluous, returning a pointer to the root
root_build_node = building_stack[0]
return node_assoc[root_build_node.id]
@classmethod
def layered_pruned_linked_spn(cls, root_node):
"""
WRITEME
"""
#
# first traverse the spn top down to collect a bottom up traversal order
# it could be done in a single pass I suppose, btw...
building_queue = deque()
traversal_stack = deque()
building_queue.append(root_node)
while building_queue:
#
# getting current node
curr_node = building_queue.popleft()
#
# appending it to the stack
traversal_stack.append(curr_node)
#
# considering children
try:
for child in curr_node.children:
building_queue.append(child)
except:
pass
#
# now using the inverse traversal order
for node in reversed(traversal_stack):
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', node.id)
current_children = node.children[:]
current_weights = node.weights[:]
# getting children
children_to_add = deque()
children_weights_to_add = deque()
for child_node, child_weight in zip(current_children,
current_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
# this shall be prune
logging.debug('++ pruning node: %d', child_node.id)
# del node.children[i]
# del node.weights[i]
# adding subchildren
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
children_to_add.append(grand_child)
children_weights_to_add.append(grand_child_weight *
child_weight)
# node.add_child(grand_child,
# grand_child_weight *
# child_weight)
# print(
# 'remaining children', [c.id for c in node.children])
else:
children_to_add.append(child_node)
children_weights_to_add.append(child_weight)
#
# adding all the children (ex grand children)
node.children.clear()
node.weights.clear()
for child_to_add, weight_to_add in zip(children_to_add, children_weights_to_add):
node.add_child(child_to_add, weight_to_add)
# else:
# print('+++ Adding it as child: %d', child_node.id)
# node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', node.id)
current_children = node.children[:]
children_to_add = deque()
# linking children
for i, child_node in enumerate(current_children):
# checking for alternating type
if isinstance(child_node, ProductNode):
# this shall be pruned
logging.debug('++ pruning node: %d', child_node.id)
# this must now be useless
# del node.children[i]
# adding children
for grand_child in child_node.children:
children_to_add.append(grand_child)
# node.add_child(grand_child)
else:
children_to_add.append(child_node)
# node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
#
# adding grand children
node.children.clear()
for child_to_add in children_to_add:
node.add_child(child_to_add)
"""
#
# printing
print(\"TRAVERSAL\")
building_queue = deque()
building_queue.append(root_node)
while building_queue:
#
# getting current node
curr_node = building_queue.popleft()
#
# appending it to the stack
print(curr_node)
#
# considering children
try:
for child in curr_node.children:
building_queue.append(child)
except:
pass
"""
#
# now transforming it layer wise
# spn = SpnFactory.layered_linked_spn(root_node)
return root_node
def merge_block_layers(layer_1, layer_2):
#
# check for type
assert type(layer_1) is type(layer_2)
#
# merging nodes
merged_nodes = [node for node in layer_1.nodes()]
merged_nodes += [node for node in layer_2.nodes()]
#
# merging
merged_layer = type(layer_1)(merged_nodes)
#
# merging i/o
merged_inputs = layer_1.input_layers | layer_2.input_layers
merged_outputs = layer_1.output_layers | layer_2.output_layers
#
# relinking
merged_layer.input_layers = merged_inputs
merged_layer.output_layers = merged_outputs
for i in merged_inputs:
i.add_output_layer(merged_layer)
for o in merged_outputs:
o.add_input_layer(merged_layer)
return merged_layer
from scopes import topological_layer_sort
def compute_block_layer_depths(spn):
#
# sort layers topologically
topo_sorted_layers = topological_layer_sort(list(spn.top_down_layers()))
#
# traverse them in this order and associate them by depth
depth_dict = {}
depth_dict[spn.input_layer()] = 0
for layer in topo_sorted_layers:
child_layer_depths = [depth_dict[p] for p in layer.input_layers]
depth_dict[layer] = max(child_layer_depths) + 1
return depth_dict
def edge_density_after_merge(layer_1, layer_2):
n_input_nodes_1 = sum([l.n_nodes() for l in layer_1.input_layers])
n_input_nodes_2 = sum([l.n_nodes() for l in layer_2.input_layers])
n_input_nodes = n_input_nodes_1 + n_input_nodes_2
n_output_nodes = layer_1.n_nodes() + layer_2.n_nodes()
n_max_edges = n_input_nodes * n_output_nodes
n_edges = layer_1.n_edges() + layer_2.n_edges()
return n_edges / n_max_edges
def merge_block_layers_spn(spn, threshold, compute_heuristics=edge_density_after_merge):
"""
Given an alternated layer linked SPN made by many block layers, try to
aggregate them into macro blocks
"""
#
# lebeling each block with its depth level
layer_depth_dict = compute_block_layer_depths(spn)
#
# create an inverse dict with depth level -> blocks
depth_layer_dict = defaultdict(set)
for layer, depth in layer_depth_dict.items():
depth_layer_dict[depth].add(layer)
#
# here we are storing the new levels, we are assuming the input layer always to be alone
mod_layers = []
#
# from each level, starting from the bottom, excluding the input layer
for k in sorted(depth_layer_dict.keys())[1:]:
print('Considering depth {}'.format(k))
mergeable = True
k_depth_layers = depth_layer_dict[k]
while mergeable:
#
# retrieve layers at that depth
#
# for each possible pair compute an heuristic score
best_score = -numpy.inf
best_pair = None
layer_pairs = itertools.combinations(k_depth_layers, 2)
can_merge = False
for layer_1, layer_2 in layer_pairs:
print('\tConsidering layers: {0} {1}'.format(layer_1.id,
layer_2.id))
score = compute_heuristics(layer_1, layer_2)
if score > best_score and score > threshold:
can_merge = True
best_score = score
best_pair = (layer_1, layer_2)
if can_merge:
print('merging', best_pair[0].id, best_pair[1].id)
#
# merging the best pair
merged_layer = merge_block_layers(*best_pair)
#
# disconnecting the previous ones
best_pair[0].disconnect_layer()
best_pair[1].disconnect_layer()
#
# storing them back
k_depth_layers = [l for l in k_depth_layers
if l != best_pair[0] and l != best_pair[1]]
k_depth_layers.append(merged_layer)
else:
mergeable = False
#
# finally storing them
mod_layers.extend(k_depth_layers)
#
# creating an SPN out of it:
mod_spn = SpnLinked(input_layer=spn.input_layer(),
layers=mod_layers)
return mod_spn
def retrieve_children_parent_assoc(spn, root=None):
"""
Builds a map children node -> parent from a linked spn
"""
if root is None:
root = spn.root()
parent_assoc = defaultdict(set)
#
# traversing it
for node in spn.top_down_nodes():
if hasattr(node, 'children') and node.children:
for child in node.children:
parent_assoc[child].add(node)
return parent_assoc
def linked_categorical_input_to_indicators(spn, input_layer=None):
"""
Convertes a linked spn categorical input layer into an indicator one
"""
#
# get child, parent relations for node relinking
child_assoc = retrieve_children_parent_assoc(spn)
#
# get input layer
cat_input_layer = spn.input_layer()
assert isinstance(cat_input_layer, CategoricalSmoothedLayerLinked)
#
# one indicator node for each var value
vars = cat_input_layer.vars()
if not vars:
vars = list(sorted({node.var for node in cat_input_layer.nodes()}))
feature_values = cat_input_layer.feature_vals()
# print('vars', vars)
# print('feature values', feature_values)
indicator_nodes = [CategoricalIndicatorNode(var, val)
for i, var in enumerate(vars) for val in range(feature_values[i])]
# for node in indicator_nodes:
# print(node)
indicator_map = defaultdict(set)
for ind_node in indicator_nodes:
indicator_map[ind_node.var].add(ind_node)
sum_nodes = []
#
# as many sum nodes as cat nodes
for node in cat_input_layer.nodes():
sum_node = SumNode(var_scope=frozenset([node.var]))
sum_nodes.append(sum_node)
for ind_node in sorted(indicator_map[node.var], key=lambda x: x.var_val):
sum_node.add_child(ind_node, numpy.exp(node._var_probs[ind_node.var_val]))
#
# removing links to parents
parents = child_assoc[node]
for p_node in parents:
#
# assume it to be a product node
# TODO: generalize
assert isinstance(p_node, ProductNode)
p_node.children.remove(node)
p_node.add_child(sum_node)
#
# creating layer
sum_layer = SumLayerLinked(sum_nodes)
indicator_layer = CategoricalIndicatorLayerLinked(indicator_nodes)
cat_input_layer.disconnect_layer()
spn.set_input_layer(indicator_layer)
spn.insert_layer(sum_layer, 0)
return spn
def make_marginalized_network_constant(spn, vars_to_marginalize):
"""
Replacing sub networks whose scope has to be marginalized over
with constant nodes
"""
#
# get child, parent relations for node relinking
child_assoc = retrieve_children_parent_assoc(spn)
const_nodes_to_add = []
scope_to_marginalize = frozenset(vars_to_marginalize)
#
# bottom up traversal
for layer in spn.bottom_up_layers():
layer_nodes_to_remove = []
for node in layer.nodes():
#
# is this a non-leaf node? or a leaf whose scope is to marginalize?
to_remove = False
if hasattr(node, 'children'):
#
# if all his children are constants, we can remove it
if all([isinstance(child, ConstantNode) for child in node.children]):
to_remove = True
else:
if node.var in scope_to_marginalize:
to_remove = True
if to_remove:
const_node = ConstantNode(node.var)
const_nodes_to_add.append(const_node)
parents = child_assoc[node]
#
# unlink it from parents and relink constant node
for p_node in parents:
if isinstance(p_node, SumNode):
w = p_node.remove_child(node)
p_node.add_child(const_node, w)
else:
p_node.remove_child(node)
p_node.add_child(const_node)
#
# remove it from the layer as well
layer_nodes_to_remove.append(node)
for node in layer_nodes_to_remove:
layer.remove_node(node)
#
# is the layer now empty?
if not layer._nodes:
raise ValueError('Layer is empty, unhandled case')
#
# adding all constant nodes to the previous nodes in a new input layer
input_nodes = [node for node in spn.input_layer().nodes()] + const_nodes_to_add
new_input_layer = CategoricalInputLayer(nodes=input_nodes)
spn.set_input_layer(new_input_layer)
def split_layer_by_node_scopes(layer, node_layer_assoc, group_by=10):
"""
Splits a layer according to its nodes scopes. It may be useful
for indicator layers with many nodes
"""
scopes_to_nodes = defaultdict(set)
n_nodes = len(list(layer.nodes()))
for node in layer.nodes():
if hasattr(node, 'var_scope') and node.var_scope:
scopes_to_nodes[frozenset(node.var_scope)].add(node)
elif hasattr(node, 'var') and node.var:
scopes_to_nodes[frozenset([node.var])].add(node)
else:
raise ValueError('Node without scope {}'.format(node))
#
# aggregating together=?
sub_layers = None
n_scopes = len(scopes_to_nodes)
if group_by: # and group_by < n_scopes:
n_groups = n_scopes // group_by if n_scopes % group_by == 0 else n_scopes // group_by + 1
print(n_groups)
node_groups = [[] for j in range(n_groups)]
for i, (_scope, nodes) in enumerate(scopes_to_nodes.items()):
node_groups[i % n_groups].extend(nodes)
sub_layers = [layer.__class__(nodes=nodes)
for nodes in node_groups if nodes]
else:
sub_layers = [layer.__class__(nodes=nodes)
for _output, nodes in scopes_to_nodes.items()]
#
# we have to update the node layer assoc map
for s in sub_layers:
for node in s.nodes():
node_layer_assoc[node] = s
print('[S] Layer: {} ({}) into {} layers {} ({})'.format(layer.id,
layer.__class__.__name__,
len(sub_layers),
[l.id for l in sub_layers],
[len(list(l.nodes()))
for l in sub_layers]))
return sub_layers
def split_layer_by_outputs(layer,
child_parent_assoc,
node_layer_assoc,
max_n_nodes=None):
"""
Splits a layer into different sublayers whose nodes have outputs in the same
layers, if possible.
Note, they cannot be directly reused in a linked spn otherwise stats are
getting messed up
TODO: fix this
"""
if max_n_nodes is None:
max_n_nodes = numpy.inf
output_to_nodes = defaultdict(set)
for node in layer.nodes():
output_layers = set()
for parent in child_parent_assoc[node]:
output_layers.add(node_layer_assoc[parent])
output_to_nodes[frozenset(output_layers)].add(node)
# sub_layers = [layer.__class__(nodes=nodes)
# for _output, nodes in output_to_nodes.items()]
sub_layers = []
for _output, nodes in output_to_nodes.items():
#
# do we need to break the layer even more?
if len(nodes) < max_n_nodes:
sub_layers.append(layer.__class__(nodes=nodes))
else:
for i in range(0, len(nodes), max_n_nodes):
node_list = list(nodes)
sub_layers.append(layer.__class__(nodes=node_list[i:i + max_n_nodes]))
#
# we have to update the node layer assoc map
for s in sub_layers:
for node in s.nodes():
node_layer_assoc[node] = s
print('[O] Layer: {} ({}) into {} layers {} ({})'.format(layer.id,
layer.__class__.__name__,
len(sub_layers),
[l.id for l in sub_layers],
[len(list(l.nodes()))
for l in sub_layers]))
# if mixed_output_nodes:
# sub_layers.append(layer.__class__(nodes=list(mixed_output_nodes)))
return sub_layers
def build_theanok_input_layer(input_layer, n_features, feature_vals):
input_dim = n_features
output_dim = len(list(input_layer.nodes()))
mask = []
for node in input_layer.nodes():
mask.append(sum(feature_vals[:node.var]) + node.var_val)
mask = numpy.array(mask)
# print('mask', mask)
return InputLayerTheanok(input_dim, output_dim, mask, layer_id=input_layer.id)
def build_theanok_layer(output_layer, input_layers, theano_inputs, dtype=float):
"""
Creating a theanok layer representing the linked output_layer
and considering its (linked) input layers already built.
"""
output_nodes = list(output_layer.nodes())
input_nodes = []
for l in sorted(input_layers):
input_nodes.extend(list(l.nodes()))
# print('input nodes {}'.format([n.id for n in input_nodes]))
output_dim = len(output_nodes)
input_dim = len(input_nodes)
output_nodes_assoc = {node: i for i, node in enumerate(output_nodes)}
input_nodes_assoc = {node: i for i, node in enumerate(input_nodes)}
#
# creating the weight matrix
W = numpy.zeros((input_dim, output_dim), dtype=dtype)
if isinstance(output_layer, SumLayerLinked):
for node in output_nodes:
for j, child in enumerate(node.children):
# print('{}->{} ({}, {})'.format(node.id,
# child.id,
# input_nodes_assoc[child],
# output_nodes_assoc[node]))
W[input_nodes_assoc[child], output_nodes_assoc[node]] = node.weights[j]
elif isinstance(output_layer, ProductLayerLinked):
for node in output_nodes:
for child in node.children:
# print('{}->{} ({}, {})'.format(node.id,
# child.id,
# input_nodes_assoc[child],
# output_nodes_assoc[node]))
W[input_nodes_assoc[child], output_nodes_assoc[node]] = 1
else:
raise ValueError('Unrecognized layer type: {}'.format(output_layer.__class__.__name__))
#
# creating scope matrix
# TODO: creating the scope matrix
scope = None
#
# creating layer
layer = None
if isinstance(output_layer, SumLayerLinked):
layer = SumLayerTheanok(input_dim=input_dim,
output_dim=output_dim,
layer_id=output_layer.id,
weights=W)
elif isinstance(output_layer, ProductLayerLinked):
layer = ProductLayerTheanok(input_dim=input_dim,
output_dim=output_dim,
layer_id=output_layer.id,
weights=W)
else:
raise ValueError('Unrecognized layer type: {}'.format(output_layer.__class__.__name__))
#
# double linking it
for input_layer in theano_inputs:
layer.add_input_layer(input_layer)
input_layer.add_output_layer(layer)
return layer
def build_theanok_spn_from_block_linked(spn,
n_features,
feature_vals,
group_by=0,
max_n_nodes_layer=None):
"""
Translating a block linked spn into a block theano-keras-like
"""
#
# setting counter to the current max
max_node_count = max([node.id for node in spn.top_down_nodes()]) + 1
max_layer_count = max([layer.id for layer in spn.top_down_layers()]) + 1
Node.set_id_counter(max_node_count)
LayerLinked.set_id_counter(max_layer_count)
#
# transforming the categorical input layer into a layer of indicator nodes
if isinstance(spn.input_layer(), CategoricalSmoothedLayerLinked):
logging.info('Transforming input layer from categorical to indicators...')
spn = linked_categorical_input_to_indicators(spn)
node_layer_map = {node: layer for layer in spn.bottom_up_layers() for node in layer.nodes()}
child_parent_map = retrieve_children_parent_assoc(spn)
#
# top down layers traversal, discarding input layer and splitting
top_down_layers = []
for l in list(spn.top_down_layers()):
split_layers = split_layer_by_outputs(l, child_parent_map, node_layer_map,
max_n_nodes=max_n_nodes_layer)
# #
# # we can split input layers even further
# if isinstance(l, CategoricalIndicatorLayerLinked):
# for s in split_layers:
# top_down_layers.extend(split_layer_by_node_scopes(s, group_by))
# else:
# if l.id == max_layer_count + 1:
# pass
# for s in split_layers:
# top_down_layers.extend(split_layer_by_node_scopes(s,
# node_layer_map,
# group_by))
# else:
# top_down_layers.extend(split_layers)
top_down_layers.extend(split_layers)
#
# recomputing the node layer map
node_layer_map = {node: layer for layer in top_down_layers for node in layer.nodes()}
# for node in spn.input_layer().nodes():
# node_layer_map[node] = spn.input_layer()
#
# linked layer -> theano layer
layer_to_layer_map = {}
#
# ordering input layer nodes
#
# proceeding bottom up
for layer in reversed(top_down_layers):
logging.debug('{}'.format(layer))
#
# retrieve the input layers
input_layers = set()
#
# indicator node
if isinstance(layer, CategoricalIndicatorLayerLinked):
theano_layer = build_theanok_input_layer(layer, n_features, feature_vals)
else:
for node in layer.nodes():
if hasattr(node, 'children'):
for child in node.children:
# if child in node_layer_map:
input_layers.add(node_layer_map[child])
theano_layers = [layer_to_layer_map[l]
for l in sorted(input_layers) if l in layer_to_layer_map]
#
# building a theano layer
dtype = None
if isinstance(layer, SumLayerLinked):
dtype = float
elif isinstance(layer, ProductLayerLinked):
dtype = int
theano_layer = build_theanok_layer(layer, input_layers, theano_layers, dtype=dtype)
#
# adding it into the mapping
layer_to_layer_map[layer] = theano_layer
#
# ordering the nodes
theano_layers = [layer for layer in layer_to_layer_map.values()]
ordered_theano_layers = topological_layer_sort(theano_layers)
theano_layers_seq = [(layer, layer.input_layers) for layer in ordered_theano_layers]
#
# build and compile
theano_spn = BlockLayeredSpn(layers=theano_layers_seq)
#
# printing layer stats
logging.info(theano_spn.layer_stats())
#
# compiling theano functions
theano_spn.compile()
return theano_spn
NODE_LAYER_TYPE_ASSOC = {
SumNode: SumLayerLinked,
ProductNode: ProductLayerLinked,
CategoricalIndicatorNode: CategoricalIndicatorLayerLinked}
def build_linked_layer_from_nodes(nodes):
return NODE_LAYER_TYPE_ASSOC[nodes[0].__class__](nodes)
| 56,522 | 36.358229 | 97 | py |
spyn-repr | spyn-repr-master/spn/theanok/layers.py | import numpy
import theano
import theano.tensor as T
from spn import LOG_ZERO
from .initializations import Initialization, sharedX, ndim_tensor
import os
#
# inspired by Keras
#
def exp_activation(x):
return T.exp(x)
def log_activation(x):
return T.log(x).clip(LOG_ZERO, 0.)
def log_sum_exp_activation(x, axis=1):
x_max = T.max(x, axis=axis, keepdims=True)
return T.log(T.sum(T.exp(x - x_max), axis=axis, keepdims=True)) + x_max
class TheanokLayer(object):
"""
WRITEME
"""
__module__ = os.path.splitext(os.path.basename(__file__))[0]
_counter_id = 0
def __init__(self,
output_dim,
weights,
input_dim=None,
input_layers=None,
layer_id=None,
init='uniform',
activation=None,
constraints=None,
scope=None,
batch_size=None):
"""
initing
"""
#
# set id
if layer_id:
self.id = layer_id
else:
self.id = TheanokLayer._counter_id
TheanokLayer._counter_id += 1
#
# storing input/output layers refs
self.input_layers = set()
if input_layers:
input_dim = sum([l.output_dim for l in input_layers])
for i in input_layers:
self.input_layers.add(i)
elif not input_dim:
raise ValueError('Input dim not specified')
self.output_layers = set()
if weights is not None:
assert weights.shape[0] == input_dim
assert weights.shape[1] == output_dim
self.initial_weights = weights
else:
self.initial_weights = None
self.batch_size = batch_size
#
# setting scope matrix
self.scope = scope
#
# setting dimensions
self.input_dim = input_dim
self.output_dim = output_dim
self.set_input_shape((self.input_dim,))
#
# setting activation
self.activation = activation
#
# setting optimization constraints
self.constrains = constraints
#
# parameters initialization
self.init = Initialization.get(init)
def build(self):
"""
WRITEME
"""
#
# nb_samples X n_input_units
self.input = T.matrix()
#
# n_input_units X n_output_units
if self.initial_weights is not None:
self.W = sharedX(self.initial_weights, name='W_{}'.format(self.id))
self.weights = [self.W]
#
# n_output_units X n_vars
if self.scope:
self.C = sharedX(self.scope, name='C_{}'.format(self.id))
#
# TODO: clean the superfluous parts here from keras
def set_input_shape(self, input_shape):
if type(input_shape) not in [tuple, list]:
raise Exception('Invalid input shape - input_shape should be a tuple of int.')
input_shape = (None,) + tuple(input_shape)
if hasattr(self, 'input_ndim') and self.input_ndim:
if self.input_ndim != len(input_shape):
raise Exception('Invalid input shape - Layer expects input ndim=' +
str(self.input_ndim) + ', was provided with input shape '
+ str(input_shape))
self._input_shape = input_shape
self.input = ndim_tensor(len(self._input_shape))
self.build()
def add_input_layer(self, layer):
self.input_layers.add(layer)
def add_output_layer(self, layer):
self.output_layers.add(layer)
def set_previous(self, previous_layers):
"""
WRITEME
"""
previous_output_dim = sum([l.output_dim for l in previous_layers])
assert self.input_dim == previous_output_dim
# self.previous = previous_layer
self.input_layers = previous_layers
self.build()
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X, self.W))
return output
def get_input(self, train=False):
# if hasattr(self, 'previous'):
# return self.previous.get_output(train=train)
if hasattr(self, 'input_layers') and self.input_layers:
previous_outputs = [l.get_output(train=train) for l in sorted(self.input_layers)]
return theano.tensor.concatenate(previous_outputs, axis=1)
elif hasattr(self, 'input'):
return self.input
else:
raise Exception('Layer is not connected\
and is not an input layer.')
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def n_nodes(self):
return self.output_dim
def n_edges(self):
if hasattr(self, 'W'):
return numpy.sum(self.W.get_value() > 0.0)
else:
return 0
def __eq__(self, layer):
return self.id == layer.id
def __lt__(self, layer):
return self.id < layer.id
def __hash__(self):
# print('has')
# from pprint import pprint
# pprint(vars(self))
return hash(self.id)
def compile(self,):
"""
Creating a theano function to retrieve the layer output
"""
self.evaluate_layer_func = theano.function([self.input], self.get_output())
# output = self.get_output()
# self.evaluate_layer_func = theano.function([self.get_input()], output)
def evaluate(self, input_signal, flatten=False):
res = self.evaluate_layer_func(input_signal)
if flatten:
res = res.flatten()
return res
def stats(self):
n_edges = self.n_edges()
stats_str = '{1}\tx\t{0},\t{2}\t({3})'.format(self.n_nodes(),
self.input_dim,
n_edges,
n_edges / (self.n_nodes() * self.input_dim))
return stats_str
def __repr__(self):
layer_str = 'id:{0} [{1}]->[{2}]\n'.format(self.id,
','.join([str(l.id)
for l in sorted(self.input_layers)]),
','.join([str(l.id)
for l in sorted(self.output_layers)]))
weights_str = ""
if hasattr(self, 'W'):
weights_str = '\n{}\n'.format(self.W.get_value())
div = '\n**********************************************************\n'
stats_str = self.stats()
return layer_str + weights_str + stats_str + div
class SumLayer(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
init='uniform'):
"""
Properly calling basic layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
activation=log_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
#
# I should have saved
#
# then storing the weights as parameters
self.params = [self.W]
def __repr__(self):
return '[sum layer:]\n' + TheanokLayer.__repr__(self)
class InputLayer_logspace(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
mask,
layer_id=None):
"""
Just doing the logarithm of the input
"""
assert len(mask) == output_dim
self.mask = mask
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=None,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
self.M = sharedX(self.mask, name='mask_{}'.format(self.id), dtype=int)
self.params = []
def get_output(self, train=False):
X = self.get_input(train)
return T.clip(T.log(X[:, self.M]), LOG_ZERO, 0)
def __repr__(self):
return '[input layer log:]\n' + TheanokLayer.__repr__(self)
class SumLayer_logspace(TheanokLayer):
# __module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
init='uniform'):
"""
The activation function is the logsumexp,
for numerical stability here we are assuming the product layer to be linear layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
#
# then storing the weights as parameters
self.params = [self.W]
def get_output(self, train=False):
X = self.get_input(train)
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
x_max = T.max(X, axis=1, keepdims=True)
return (T.log(T.sum(T.exp(X - x_max), axis=1, keepdims=True)) + x_max).reshape((X.shape[0],
self.W.shape[1]))
def __repr__(self):
return '[sum layer log:]\n' + TheanokLayer.__repr__(self)
class MaxLayer_logspace(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
init='uniform',
layer_id=None,
batch_size=None):
"""
The activation function is a max (still in the log space)
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None,
batch_size=batch_size)
def build(self):
#
# building the base layer
super().build()
# # FIXME: this shall cope with a variable batch size
# # storing a tensor for the max position values
# weight_shape = self.W.shape.eval()
# m_values = numpy.zeros((self.batch_size, weight_shape[0], weight_shape[1]))
# self.M = sharedX(m_values, name='M_{}'.format(self.id))
#
# then storing the weights as parameters
self.params = [self.W]
def get_output(self, train=False):
X = self.get_input(train)
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
X_max = T.max(X, axis=1)
return X_max
def compile(self,):
#
# and adding a function to retrieve the max map
# a binary mask that has 1 when there was the max connection
X = self.input
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
#
# TODO: mask only one value (argmax)
M = T.switch(T.eq(T.max(X, axis=1, keepdims=True), X), 1, 0)
self.evaluate_layer_func = theano.function([self.input], [self.get_output(), M])
def __repr__(self):
return '[max layer log:]\n' + TheanokLayer.__repr__(self)
class ProductLayer(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
batch_size=None):
"""
Properly calling basic layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
activation=exp_activation,
layer_id=layer_id,
batch_size=batch_size)
def build(self):
#
# building the base layer
super().build()
#
# Shall we have to store the parameters for product layers?
self.params = [self.W]
def __repr__(self):
return '[prod layer:]\n' + TheanokLayer.__repr__(self)
class ProductLayer_logspace(TheanokLayer):
# __module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
batch_size=None):
"""
No activation function, the output is in the log domain
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
layer_id=layer_id,
batch_size=batch_size)
def get_output(self, train=False):
X = self.get_input(train)
output = T.dot(X, self.W)
return output
def build(self):
#
# building the base layer
super().build()
#
# Shall we have to store the parameters for product layers?
self.params = [self.W]
def __repr__(self):
return '[prod layer log:]\n' + TheanokLayer.__repr__(self)
| 13,956 | 28.259958 | 105 | py |
normalizing_flows | normalizing_flows-master/test.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch.utils.data import DataLoader, Dataset
import unittest
from unittest.mock import MagicMock
from maf import MADE, MADEMOG, MAF, MAFMOG, RealNVP, BatchNorm, LinearMaskedCoupling, train
from glow import Actnorm, Invertible1x1Conv, AffineCoupling, Squeeze, Split, FlowStep, FlowLevel, Glow, train_epoch
from data import fetch_dataloaders
args = MagicMock()
args.input_size = 1000
args.batch_size = 100
args.device = torch.device('cpu')
NORM_TOL = 1e-4 # tolerance for difference in vector norms
torch.manual_seed(1)
# --------------------
# Test invertibility and log dets of individual layers
# --------------------
def test_layer(l, input_dims, cond_label_size=None, norm_tol=NORM_TOL):
x = torch.randn(input_dims)
batch_size = input_dims[0]
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(batch_size // cond_label_size + 1, 1)[:batch_size]
u, logd = l(x) if labels is None else l(x, labels)
recon_x, inv_logd = l.inverse(u) if labels is None else l.inverse(u, labels)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < norm_tol, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < norm_tol, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
class TestFlowLayers(unittest.TestCase):
def test_batch_norm(self):
l = BatchNorm(args.input_size)
l.train()
for i in range(3):
with self.subTest(train_loop_iter=i):
test_layer(l, input_dims=(args.batch_size, args.input_size))
l.eval()
for i in range(2):
with self.subTest(eval_loop_iter=i):
test_layer(l, input_dims=(args.batch_size, args.input_size))
def test_linear_coupling(self):
mask = torch.arange(args.input_size).float() % 2
# unconditional
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=1, mask=mask), input_dims=(args.batch_size, args.input_size))
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=2, mask=mask), input_dims=(args.batch_size, args.input_size))
# conditional
cond_label_size = 10
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=1, mask=mask, cond_label_size=cond_label_size),
input_dims=(args.batch_size, args.input_size), cond_label_size=cond_label_size)
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=2, mask=mask, cond_label_size=cond_label_size),
input_dims=(args.batch_size, args.input_size), cond_label_size=cond_label_size)
def test_made(self):
test_layer(MADE(args.input_size, hidden_size=10, n_hidden=3), input_dims=(args.batch_size, args.input_size))
def test_actnorm(self):
test_layer(Actnorm(param_dim=(1,3,1,1)), input_dims=(args.batch_size, 3, 50, 50))
def test_invertible1x1conv(self):
test_layer(Invertible1x1Conv(n_channels=24), input_dims=(args.batch_size, 24, 50, 50), norm_tol=1e-3)
test_layer(Invertible1x1Conv(n_channels=12, lu_factorize=True), input_dims=(args.batch_size, 12, 50, 50), norm_tol=1e-3)
def test_affinecoupling(self):
test_layer(AffineCoupling(n_channels=4, width=12), input_dims=(args.batch_size, 4, 50, 50), norm_tol=5e-4)
def test_squeeze(self):
net = Squeeze()
x = torch.rand(args.batch_size, 12, 20, 30)
recon_x = net.inverse(net(x))
y = net(net.inverse(x))
assert torch.allclose(x, recon_x), 'Data reconstruction failed.'
assert torch.allclose(x, y)
def test_split(self):
net = Split(n_channels=10)
x = torch.randn(args.batch_size, 10, 20, 30)
x1, z2, logd = net(x)
recon_x, inv_logd = net.inverse(x1, z2)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < 1e-4, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < 1e-4, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
def test_flowstep(self):
test_layer(FlowStep(n_channels=4, width=12), input_dims=(args.batch_size, 4, 50, 50), norm_tol=1e-3)
def test_flowlevel(self):
net = FlowLevel(n_channels=3, width=12, depth=2)
x = torch.randn(args.batch_size, 3, 32, 32)
x1, z2, logd = net(x)
recon_x, inv_logd = net.inverse(x1, z2)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < 5e-4, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < 5e-4, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
def test_glow(self):
net = Glow(width=12, depth=3, n_levels=3)
x = torch.randn(args.batch_size, 3, 32, 32)
zs, logd = net(x)
recon_x, inv_logd = net.inverse(zs)
y, _ = net.inverse(batch_size=args.batch_size)
d_data, d_data_y, d_logd = (recon_x - x).norm(), (x - y).norm(), (logd + inv_logd).norm()
assert d_data < 1e-3, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
# assert d_data_y < 1e-3, 'Data reconstruction (inv > base > inv) fail - norm of difference = {}.'.format(d_data_y)
assert d_logd < 1e-3, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
# --------------------
# Test MAF
# --------------------
# Test flow invertibility (KL=0) at initalization
@torch.no_grad()
def test_untrained_model(model, cond_label_size=None):
# 1. sample Gaussian data;
# 2. run model forward and reverse;
# 3. roconstruct data;
# 4. measure KL between Gaussian fitted to the data and the base distribution
n_samples = 1000
data = model.base_dist.sample((n_samples,))
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(n_samples // cond_label_size + 1, 1)[:n_samples]
u, logd = model(data, labels)
recon_data, _ = model.inverse(u, labels)
recon_dist = D.Normal(recon_data.mean(0), recon_data.var(0).sqrt())
kl = D.kl.kl_divergence(recon_dist, model.base_dist).sum(-1)
print('KL (q || p) = {:.4f}'.format(kl))
# Test flow can train to random numbers in N(0,1) ie KL(random numbers driving flow || base distribution) = 0
@torch.no_grad()
def test_trained_model(model, dl, cond_label_size=None):
# 1. sample toy data;
# 2. run model forward and generate random numbers driving the model;
# 3. measure KL between Gaussian fitted to random numbers driving the model and the base distribution
data, labels = next(iter(dl))
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(n_samples // cond_label_size + 1, 1)[:n_samples]
u, logd = model(data, labels)
u_dist = D.Normal(u.mean(0), u.std(0))
kl = D.kl.kl_divergence(u_dist, model.base_dist).sum()
print('KL (u || p) = {:.4f}'.format(kl))
class TestMAFUntrained(unittest.TestCase):
def setUp(self):
self.cond_label_size = 2
def test_made_1_hidden(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential'))
def test_made_1_hidden_conditional(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size, activation='relu',
input_order='sequential'), self.cond_label_size)
def test_made_2_hidden(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu', input_order='sequential'))
def test_made_2_hidden_conditional(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=2, cond_label_size=self.cond_label_size, activation='relu',
input_order='sequential'), self.cond_label_size)
def test_made_200_inputs_random_mask(self):
test_untrained_model(MADE(input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu', input_order='random'))
def test_maf_1_blocks_no_bn(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=False))
def test_maf_1_blocks_bn(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True))
def test_maf_2_blocks(self):
test_untrained_model(MAF(n_blocks=2, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True))
def test_maf_1_blocks_conditional(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_maf_2_blocks_conditional(self):
test_untrained_model(MAF(n_blocks=2, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_realnvp_1_block_200_inputs(self):
test_untrained_model(RealNVP(n_blocks=1, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None))
def test_realnvp_2_block_200_inputs(self):
test_untrained_model(RealNVP(n_blocks=2, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None))
def test_realnvp_2_blocks_conditional(self):
test_untrained_model(RealNVP(n_blocks=2, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=self.cond_label_size),
self.cond_label_size)
def test_mademog_1_comp(self):
test_untrained_model(MADEMOG(n_components=1, input_size=10, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu',
input_order='sequential'))
def test_mademog_10_comp(self):
test_untrained_model(MADEMOG(n_components=10, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu',
input_order='sequential'))
def test_mafmog_1_block_1_comp(self):
test_untrained_model(MAFMOG(n_blocks=1, n_components=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_mafmog_2_blocks_10_comp_conditional(self):
test_untrained_model(MAFMOG(n_blocks=2, n_components=10, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
class TestMAFTrained(unittest.TestCase):
def setUp(self):
args = MagicMock()
args.cond_label_size = None
args.batch_size = 100
args.device = torch.device('cpu')
dl, _ = fetch_dataloaders('TOY', args.batch_size, args.device, flip_toy_var_order=False)
def _train(model, n_steps):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
print('Untrained: ')
test_trained_model(model, dl)
for _ in range(n_steps):
train(model, dl, optimizer, 0, args)
print('Trained: ')
test_trained_model(model, dl)
self._train = _train
def test_made(self):
model = MADE(input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential')
self._train(model, 10)
def test_mademog(self):
model = MADEMOG(n_components=10, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential')
self._train(model, 5)
def test_maf_5(self):
model = MAF(n_blocks=5, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 1)
def test_mafmog_5_comp_1(self):
model = MAFMOG(n_blocks=5, n_components=1, input_size=2, hidden_size=100, n_hidden=1,
cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 5)
def test_mafmog_5_comp_10(self):
model = MAFMOG(n_blocks=5, n_components=10, input_size=2, hidden_size=100, n_hidden=1,
cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 10)
def test_train_realnvp_5(self):
model = RealNVP(n_blocks=5, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, batch_norm=True)
self._train(model, 5)
# --------------------
# Test Glow
# --------------------
# Generate a dataset from a 2-dim Gaussian distribution and expand to `image size` of (3,32,32)
class ToyDistribution(D.Distribution):
def __init__(self, flip_var_order):
super().__init__()
self.flip_var_order = flip_var_order
self.p_x2 = D.Normal(0, 4)
self.p_x1 = lambda x2: D.Normal(0.25 * x2**2, 1)
def rsample(self, sample_shape=torch.Size()):
x2 = self.p_x2.sample(sample_shape)
x1 = self.p_x1(x2).sample()
if self.flip_var_order:
return torch.stack((x2, x1), dim=-1).expand(3,-1,-1)
else:
return torch.stack((x1, x2), dim=0).repeat(16,1).expand(3,-1,-1)
def log_prob(self, value):
if self.flip_var_order:
value = value.flip(1)
return self.p_x1(value[:,1]).log_prob(value[:,0]) + self.p_x2.log_prob(value[:,1])
class TOY(Dataset):
def __init__(self, dataset_size=2500, flip_var_order=False):
self.input_size = 32
self.label_size = 1
self.dataset_size = dataset_size
self.base_dist = ToyDistribution(flip_var_order)
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.base_dist.sample((32,)), torch.zeros(self.label_size)
class TestGlowUntrained(unittest.TestCase):
def setUp(self):
def test_kl(model):
n_samples = 1000
data = model.base_dist.sample((n_samples,3,32,32)).squeeze()
zs, logd = model(data)
recon_data, _ = model.inverse(zs)
recon_dist = D.Normal(recon_data.mean(0), recon_data.var(0).sqrt())
kl = D.kl.kl_divergence(recon_dist, model.base_dist).mean()
print('Model: depth {}, levels {}; Avg per pixel KL (q||p) = {:.4f}'.format(
len(model.flowstep), len(model.flowlevels), kl))
self.test_kl = test_kl
def test_glow_depth_1_levels_1(self):
# 1. sample data; 2. run model forward and reverse; 3. roconstruct data; 4. measure KL between Gaussian fitted to the data and the base distribution
self.test_kl(Glow(width=12, depth=1, n_levels=1))
def test_glow_depth_2_levels_2(self):
# 1. sample data; 2. run model forward and reverse; 3. roconstruct data; 4. measure KL between Gaussian fitted to the data and the base distribution
self.test_kl(Glow(width=12, depth=2, n_levels=2))
class TestGlowTrained(unittest.TestCase):
def setUp(self):
args = MagicMock()
args.device = torch.device('cpu')
dl = DataLoader(TOY(), batch_size=100)
@torch.no_grad()
def _test_trained_model(model, dl, cond_label_size=None):
data, _ = next(iter(dl))
zs, logd = model(data)
zs = torch.cat([z.flatten(1) for z in zs], dim=1) # flatten the z's and concat
zs_dist = D.Normal(zs.mean(0), zs.std(0))
kl = D.kl.kl_divergence(zs_dist, model.base_dist).mean()
print('Mean per pixel KL (zs || N(0,1)) = {:.4f}'.format(kl))
print('Mean data bits per pixel: {:.4f}'.format(-model.log_prob(data, bits_per_pixel=True).mean(0)))
def _train(model, n_steps):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
print('Model: depth {}, levels {}. Untrained:'.format(len(model.flowstep), len(model.flowlevels)))
_test_trained_model(model, dl)
for _ in range(n_steps):
train_epoch(model, dl, optimizer, 0, args)
print('Trained: ')
_test_trained_model(model, dl)
self._train = _train
def test_glow_1_1(self):
model = Glow(width=12, depth=1, n_levels=1)
self._train(model, 3)
def test_glow_3_3(self):
model = Glow(width=24, depth=3, n_levels=3)
self._train(model, 3)
if __name__ == '__main__':
unittest.main()
| 17,209 | 45.016043 | 156 | py |
normalizing_flows | normalizing_flows-master/data.py | from functools import partial
import numpy as np
import torch
import torchvision.transforms as T
from torch.utils.data import DataLoader, TensorDataset
import datasets
# --------------------
# Helper functions
# --------------------
def logit(x, eps=1e-5):
x.clamp_(eps, 1 - eps)
return x.log() - (1 - x).log()
def one_hot(x, label_size):
out = torch.zeros(len(x), label_size).to(x.device)
out[torch.arange(len(x)), x] = 1
return out
def load_dataset(name):
exec('from datasets.{} import {}'.format(name.lower(), name))
return locals()[name]
# --------------------
# Dataloaders
# --------------------
def fetch_dataloaders(dataset_name, batch_size, device, flip_toy_var_order=False, toy_train_size=25000, toy_test_size=5000):
# grab datasets
if dataset_name in ['GAS', 'POWER', 'HEPMASS', 'MINIBOONE', 'BSDS300']: # use the constructors by MAF authors
dataset = load_dataset(dataset_name)()
# join train and val data again
train_data = np.concatenate((dataset.trn.x, dataset.val.x), axis=0)
# construct datasets
train_dataset = TensorDataset(torch.from_numpy(train_data.astype(np.float32)))
test_dataset = TensorDataset(torch.from_numpy(dataset.tst.x.astype(np.float32)))
input_dims = dataset.n_dims
label_size = None
lam = None
elif dataset_name in ['MNIST']:
dataset = load_dataset(dataset_name)()
# join train and val data again
train_x = np.concatenate((dataset.trn.x, dataset.val.x), axis=0).astype(np.float32)
train_y = np.concatenate((dataset.trn.y, dataset.val.y), axis=0).astype(np.float32)
# construct datasets
train_dataset = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_dataset = TensorDataset(torch.from_numpy(dataset.tst.x.astype(np.float32)),
torch.from_numpy(dataset.tst.y.astype(np.float32)))
input_dims = dataset.n_dims
label_size = 10
lam = dataset.alpha
elif dataset_name in ['TOY', 'MOONS']: # use own constructors
train_dataset = load_dataset(dataset_name)(toy_train_size, flip_toy_var_order)
test_dataset = load_dataset(dataset_name)(toy_test_size, flip_toy_var_order)
input_dims = train_dataset.input_size
label_size = train_dataset.label_size
lam = None
# imaging dataset pulled from torchvision
elif dataset_name in ['CIFAR10']:
label_size = 10
# MAF logit trainform parameter (cf. MAF paper 4.3
lam = 1e-6 if dataset_name == 'mnist' else 5e-2
# MAF paper converts image data to logit space via transform described in section 4.3
image_transforms = T.Compose([T.ToTensor(),
T.Lambda(lambda x: x + torch.rand(*x.shape) / 256.), # dequantize (cf MAF paper)
T.Lambda(lambda x: logit(lam + (1 - 2 * lam) * x))]) # to logit space (cf MAF paper)
target_transforms = T.Lambda(lambda x: partial(one_hot, label_size=label_size)(x))
train_dataset = load_dataset(dataset_name)(root=datasets.root, train=True, transform=image_transforms, target_transform=target_transforms)
test_dataset = load_dataset(dataset_name)(root=datasets.root, train=True, transform=image_transforms, target_transform=target_transforms)
input_dims = train_dataset[0][0].shape
else:
raise ValueError('Unrecognized dataset.')
# keep input dims, input size and label size
train_dataset.input_dims = input_dims
train_dataset.input_size = int(np.prod(input_dims))
train_dataset.label_size = label_size
train_dataset.lam = lam
test_dataset.input_dims = input_dims
test_dataset.input_size = int(np.prod(input_dims))
test_dataset.label_size = label_size
test_dataset.lam = lam
# construct dataloaders
kwargs = {'num_workers': 1, 'pin_memory': True} if device.type is 'cuda' else {}
train_loader = DataLoader(train_dataset, batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(test_dataset, batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
| 4,218 | 36.336283 | 146 | py |
normalizing_flows | normalizing_flows-master/glow.py | """
Glow: Generative Flow with Invertible 1x1 Convolutions
arXiv:1807.03039v2
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import torchvision.transforms as T
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torch.utils.checkpoint import checkpoint
from torchvision.datasets import MNIST
from datasets.celeba import CelebA
import numpy as np
from tensorboardX import SummaryWriter
import os
import time
import math
import argparse
import pprint
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--generate', action='store_true', help='Generate samples from a model.')
parser.add_argument('--visualize', action='store_true', help='Visualize manipulated attribures.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--seed', type=int, help='Random seed to use.')
# paths and reporting
parser.add_argument('--data_dir', default='/mnt/disks/data/', help='Location of datasets.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--results_file', default='results.txt', help='Filename where to store settings and test results.')
parser.add_argument('--log_interval', type=int, default=2, help='How often to show loss statistics and save samples.')
parser.add_argument('--save_interval', type=int, default=50, help='How often to save during training.')
parser.add_argument('--eval_interval', type=int, default=1, help='Number of epochs to eval model and save model checkpoint.')
# data
parser.add_argument('--dataset', type=str, help='Which dataset to use.')
# model parameters
parser.add_argument('--depth', type=int, default=32, help='Depth of the network (cf Glow figure 2).')
parser.add_argument('--n_levels', type=int, default=3, help='Number of levels of of the network (cf Glow figure 2).')
parser.add_argument('--width', type=int, default=512, help='Dimension of the hidden layers.')
parser.add_argument('--z_std', type=float, help='Pass specific standard devition during generation/sampling.')
# training params
parser.add_argument('--batch_size', type=int, default=16, help='Training batch size.')
parser.add_argument('--batch_size_init', type=int, default=256, help='Batch size for the data dependent initialization.')
parser.add_argument('--n_epochs', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--n_epochs_warmup', type=int, default=2, help='Number of warmup epochs for linear learning rate annealing.')
parser.add_argument('--start_epoch', default=0, help='Starting epoch (for logging; to be overwritten when restoring file.')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate.')
parser.add_argument('--mini_data_size', type=int, default=None, help='Train only on this number of datapoints.')
parser.add_argument('--grad_norm_clip', default=50, type=float, help='Clip gradients during training.')
parser.add_argument('--checkpoint_grads', action='store_true', default=False, help='Whether to use gradient checkpointing in forward pass.')
parser.add_argument('--n_bits', default=5, type=int, help='Number of bits for input images.')
# distributed training params
parser.add_argument('--distributed', action='store_true', default=False, help='Whether to use DistributedDataParallels on multiple machines and GPUs.')
parser.add_argument('--world_size', type=int, default=1, help='Number of nodes for distributed training.')
parser.add_argument('--local_rank', type=int, help='When provided, run model on this cuda device. When None, used by torch.distributed.launch utility to manage multi-GPU training.')
# visualize
parser.add_argument('--vis_img', type=str, help='Path to image file to manipulate attributes and visualize.')
parser.add_argument('--vis_attrs', nargs='+', type=int, help='Which attribute to manipulate.')
parser.add_argument('--vis_alphas', nargs='+', type=float, help='Step size on the manipulation direction.')
best_eval_logprob = float('-inf')
# --------------------
# Data
# --------------------
def fetch_dataloader(args, train=True, data_dependent_init=False):
args.input_dims = {'mnist': (3,32,32), 'celeba': (3,64,64)}[args.dataset]
transforms = {'mnist': T.Compose([T.Pad(2), # image to 32x32 same as CIFAR
T.RandomAffine(degrees=0, translate=(0.1, 0.1)), # random shifts to fill the padded pixels
T.ToTensor(),
T.Lambda(lambda t: t + torch.rand_like(t)/2**8), # dequantize
T.Lambda(lambda t: t.expand(3,-1,-1))]), # expand to 3 channels
'celeba': T.Compose([T.CenterCrop(148), # RealNVP preprocessing
T.Resize(64),
T.Lambda(lambda im: np.array(im, dtype=np.float32)), # to numpy
T.Lambda(lambda x: np.floor(x / 2**(8 - args.n_bits)) / 2**args.n_bits), # lower bits
T.ToTensor(), # note: if input to this transform is uint8, it divides by 255 and returns float
T.Lambda(lambda t: t + torch.rand_like(t) / 2**args.n_bits)]) # dequantize
}[args.dataset]
dataset = {'mnist': MNIST, 'celeba': CelebA}[args.dataset]
# load the specific dataset
dataset = dataset(root=args.data_dir, train=train, transform=transforms)
if args.mini_data_size:
dataset.data = dataset.data[:args.mini_data_size]
# load sampler and dataloader
if args.distributed and train is True and not data_dependent_init: # distributed training; but exclude initialization
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
batch_size = args.batch_size_init if data_dependent_init else args.batch_size # if data dependent init use init batch size
kwargs = {'num_workers': 1, 'pin_memory': True} if args.device.type is 'cuda' else {}
return DataLoader(dataset, batch_size=batch_size, shuffle=(sampler is None), drop_last=True, sampler=sampler, **kwargs)
# --------------------
# Model component layers
# --------------------
class Actnorm(nn.Module):
""" Actnorm layer; cf Glow section 3.1 """
def __init__(self, param_dim=(1,3,1,1)):
super().__init__()
self.scale = nn.Parameter(torch.ones(param_dim))
self.bias = nn.Parameter(torch.zeros(param_dim))
self.register_buffer('initialized', torch.tensor(0).byte())
def forward(self, x):
if not self.initialized:
# per channel mean and variance where x.shape = (B, C, H, W)
self.bias.squeeze().data.copy_(x.transpose(0,1).flatten(1).mean(1)).view_as(self.scale)
self.scale.squeeze().data.copy_(x.transpose(0,1).flatten(1).std(1, False) + 1e-6).view_as(self.bias)
self.initialized += 1
z = (x - self.bias) / self.scale
logdet = - self.scale.abs().log().sum() * x.shape[2] * x.shape[3]
return z, logdet
def inverse(self, z):
return z * self.scale + self.bias, self.scale.abs().log().sum() * z.shape[2] * z.shape[3]
class Invertible1x1Conv(nn.Module):
""" Invertible 1x1 convolution layer; cf Glow section 3.2 """
def __init__(self, n_channels=3, lu_factorize=False):
super().__init__()
self.lu_factorize = lu_factorize
# initiaize a 1x1 convolution weight matrix
w = torch.randn(n_channels, n_channels)
w = torch.qr(w)[0] # note: nn.init.orthogonal_ returns orth matrices with dets +/- 1 which complicates the inverse call below
if lu_factorize:
# compute LU factorization
p, l, u = torch.btriunpack(*w.unsqueeze(0).btrifact())
# initialize model parameters
self.p, self.l, self.u = nn.Parameter(p.squeeze()), nn.Parameter(l.squeeze()), nn.Parameter(u.squeeze())
s = self.u.diag()
self.log_s = nn.Parameter(s.abs().log())
self.register_buffer('sign_s', s.sign()) # note: not optimizing the sign; det W remains the same sign
self.register_buffer('l_mask', torch.tril(torch.ones_like(self.l), -1)) # store mask to compute LU in forward/inverse pass
else:
self.w = nn.Parameter(w)
def forward(self, x):
B,C,H,W = x.shape
if self.lu_factorize:
l = self.l * self.l_mask + torch.eye(C).to(self.l.device)
u = self.u * self.l_mask.t() + torch.diag(self.sign_s * self.log_s.exp())
self.w = self.p @ l @ u
logdet = self.log_s.sum() * H * W
else:
logdet = torch.slogdet(self.w)[-1] * H * W
return F.conv2d(x, self.w.view(C,C,1,1)), logdet
def inverse(self, z):
B,C,H,W = z.shape
if self.lu_factorize:
l = torch.inverse(self.l * self.l_mask + torch.eye(C).to(self.l.device))
u = torch.inverse(self.u * self.l_mask.t() + torch.diag(self.sign_s * self.log_s.exp()))
w_inv = u @ l @ self.p.inverse()
logdet = - self.log_s.sum() * H * W
else:
w_inv = self.w.inverse()
logdet = - torch.slogdet(self.w)[-1] * H * W
return F.conv2d(z, w_inv.view(C,C,1,1)), logdet
class AffineCoupling(nn.Module):
""" Affine coupling layer; cf Glow section 3.3; RealNVP figure 2 """
def __init__(self, n_channels, width):
super().__init__()
# network layers;
# per realnvp, network splits input, operates on half of it, and returns shift and scale of dim = half the input channels
self.conv1 = nn.Conv2d(n_channels//2, width, kernel_size=3, padding=1, bias=False) # input is split along channel dim
self.actnorm1 = Actnorm(param_dim=(1, width, 1, 1))
self.conv2 = nn.Conv2d(width, width, kernel_size=1, padding=1, bias=False)
self.actnorm2 = Actnorm(param_dim=(1, width, 1, 1))
self.conv3 = nn.Conv2d(width, n_channels, kernel_size=3) # output is split into scale and shift components
self.log_scale_factor = nn.Parameter(torch.zeros(n_channels,1,1)) # learned scale (cf RealNVP sec 4.1 / Glow official code
# initialize last convolution with zeros, such that each affine coupling layer performs an identity function
self.conv3.weight.data.zero_()
self.conv3.bias.data.zero_()
def forward(self, x):
x_a, x_b = x.chunk(2, 1) # split along channel dim
h = F.relu(self.actnorm1(self.conv1(x_b))[0])
h = F.relu(self.actnorm2(self.conv2(h))[0])
h = self.conv3(h) * self.log_scale_factor.exp()
t = h[:,0::2,:,:] # shift; take even channels
s = h[:,1::2,:,:] # scale; take odd channels
s = torch.sigmoid(s + 2.) # at initalization, s is 0 and sigmoid(2) is near identity
z_a = s * x_a + t
z_b = x_b
z = torch.cat([z_a, z_b], dim=1) # concat along channel dim
logdet = s.log().sum([1, 2, 3])
return z, logdet
def inverse(self, z):
z_a, z_b = z.chunk(2, 1) # split along channel dim
h = F.relu(self.actnorm1(self.conv1(z_b))[0])
h = F.relu(self.actnorm2(self.conv2(h))[0])
h = self.conv3(h) * self.log_scale_factor.exp()
t = h[:,0::2,:,:] # shift; take even channels
s = h[:,1::2,:,:] # scale; take odd channels
s = torch.sigmoid(s + 2.)
x_a = (z_a - t) / s
x_b = z_b
x = torch.cat([x_a, x_b], dim=1) # concat along channel dim
logdet = - s.log().sum([1, 2, 3])
return x, logdet
class Squeeze(nn.Module):
""" RealNVP squeezing operation layer (cf RealNVP section 3.6; Glow figure 2b):
For each channel, it divides the image into subsquares of shape 2 × 2 × c, then reshapes them into subsquares of
shape 1 × 1 × 4c. The squeezing operation transforms an s × s × c tensor into an s × s × 4c tensor """
def __init__(self):
super().__init__()
def forward(self, x):
B,C,H,W = x.shape
x = x.reshape(B, C, H//2, 2, W//2, 2) # factor spatial dim
x = x.permute(0, 1, 3, 5, 2, 4) # transpose to (B, C, 2, 2, H//2, W//2)
x = x.reshape(B, 4*C, H//2, W//2) # aggregate spatial dim factors into channels
return x
def inverse(self, x):
B,C,H,W = x.shape
x = x.reshape(B, C//4, 2, 2, H, W) # factor channel dim
x = x.permute(0, 1, 4, 2, 5, 3) # transpose to (B, C//4, H, 2, W, 2)
x = x.reshape(B, C//4, 2*H, 2*W) # aggregate channel dim factors into spatial dims
return x
class Split(nn.Module):
""" Split layer; cf Glow figure 2 / RealNVP figure 4b
Based on RealNVP multi-scale architecture: splits an input in half along the channel dim; half the vars are
directly modeled as Gaussians while the other half undergo further transformations (cf RealNVP figure 4b).
"""
def __init__(self, n_channels):
super().__init__()
self.gaussianize = Gaussianize(n_channels//2)
def forward(self, x):
x1, x2 = x.chunk(2, dim=1) # split input along channel dim
z2, logdet = self.gaussianize(x1, x2)
return x1, z2, logdet
def inverse(self, x1, z2):
x2, logdet = self.gaussianize.inverse(x1, z2)
x = torch.cat([x1, x2], dim=1) # cat along channel dim
return x, logdet
class Gaussianize(nn.Module):
""" Gaussianization per ReanNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians.
Model as Gaussians:
x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1)
then to recover the random numbers z driving the model:
z2 = (x2 - mu) * exp(-logs)
Here f(x1) is a conv layer initialized to identity.
"""
def __init__(self, n_channels):
super().__init__()
self.net = nn.Conv2d(n_channels, 2*n_channels, kernel_size=3, padding=1) # computes the parameters of Gaussian
self.log_scale_factor = nn.Parameter(torch.zeros(2*n_channels,1,1)) # learned scale (cf RealNVP sec 4.1 / Glow official code
# initialize to identity
self.net.weight.data.zero_()
self.net.bias.data.zero_()
def forward(self, x1, x2):
h = self.net(x1) * self.log_scale_factor.exp() # use x1 to model x2 as Gaussians; learnable scale
m, logs = h[:,0::2,:,:], h[:,1::2,:,:] # split along channel dims
z2 = (x2 - m) * torch.exp(-logs) # center and scale; log prob is computed at the model forward
logdet = - logs.sum([1,2,3])
return z2, logdet
def inverse(self, x1, z2):
h = self.net(x1) * self.log_scale_factor.exp()
m, logs = h[:,0::2,:,:], h[:,1::2,:,:]
x2 = m + z2 * torch.exp(logs)
logdet = logs.sum([1,2,3])
return x2, logdet
class Preprocess(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
logdet = - math.log(256) * x[0].numel() # processing each image dim from [0, 255] to [0,1]; per RealNVP sec 4.1 taken into account
return x - 0.5, logdet # center x at 0
def inverse(self, x):
logdet = math.log(256) * x[0].numel()
return x + 0.5, logdet
# --------------------
# Container layers
# --------------------
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def __init__(self, *args, **kwargs):
self.checkpoint_grads = kwargs.pop('checkpoint_grads', None)
super().__init__(*args, **kwargs)
def forward(self, x):
sum_logdets = 0.
for module in self:
x, logdet = module(x) if not self.checkpoint_grads else checkpoint(module, x)
sum_logdets = sum_logdets + logdet
return x, sum_logdets
def inverse(self, z):
sum_logdets = 0.
for module in reversed(self):
z, logdet = module.inverse(z)
sum_logdets = sum_logdets + logdet
return z, sum_logdets
class FlowStep(FlowSequential):
""" One step of Glow flow (Actnorm -> Invertible 1x1 conv -> Affine coupling); cf Glow Figure 2a """
def __init__(self, n_channels, width, lu_factorize=False):
super().__init__(Actnorm(param_dim=(1,n_channels,1,1)),
Invertible1x1Conv(n_channels, lu_factorize),
AffineCoupling(n_channels, width))
class FlowLevel(nn.Module):
""" One depth level of Glow flow (Squeeze -> FlowStep x K -> Split); cf Glow figure 2b """
def __init__(self, n_channels, width, depth, checkpoint_grads=False, lu_factorize=False):
super().__init__()
# network layers
self.squeeze = Squeeze()
self.flowsteps = FlowSequential(*[FlowStep(4*n_channels, width, lu_factorize) for _ in range(depth)], checkpoint_grads=checkpoint_grads)
self.split = Split(4*n_channels)
def forward(self, x):
x = self.squeeze(x)
x, logdet_flowsteps = self.flowsteps(x)
x1, z2, logdet_split = self.split(x)
logdet = logdet_flowsteps + logdet_split
return x1, z2, logdet
def inverse(self, x1, z2):
x, logdet_split = self.split.inverse(x1, z2)
x, logdet_flowsteps = self.flowsteps.inverse(x)
x = self.squeeze.inverse(x)
logdet = logdet_flowsteps + logdet_split
return x, logdet
# --------------------
# Model
# --------------------
class Glow(nn.Module):
""" Glow multi-scale architecture with depth of flow K and number of levels L; cf Glow figure 2; section 3"""
def __init__(self, width, depth, n_levels, input_dims=(3,32,32), checkpoint_grads=False, lu_factorize=False):
super().__init__()
# calculate output dims
in_channels, H, W = input_dims
out_channels = int(in_channels * 4**(n_levels+1) / 2**n_levels) # each Squeeze results in 4x in_channels (cf RealNVP section 3.6); each Split in 1/2x in_channels
out_HW = int(H / 2**(n_levels+1)) # each Squeeze is 1/2x HW dim (cf RealNVP section 3.6)
self.output_dims = out_channels, out_HW, out_HW
# preprocess images
self.preprocess = Preprocess()
# network layers cf Glow figure 2b: (Squeeze -> FlowStep x depth -> Split) x n_levels -> Squeeze -> FlowStep x depth
self.flowlevels = nn.ModuleList([FlowLevel(in_channels * 2**i, width, depth, checkpoint_grads, lu_factorize) for i in range(n_levels)])
self.squeeze = Squeeze()
self.flowstep = FlowSequential(*[FlowStep(out_channels, width, lu_factorize) for _ in range(depth)], checkpoint_grads=checkpoint_grads)
# gaussianize the final z output; initialize to identity
self.gaussianize = Gaussianize(out_channels)
# base distribution of the flow
self.register_buffer('base_dist_mean', torch.zeros(1))
self.register_buffer('base_dist_var', torch.ones(1))
def forward(self, x):
x, sum_logdets = self.preprocess(x)
# pass through flow
zs = []
for m in self.flowlevels:
x, z, logdet = m(x)
sum_logdets = sum_logdets + logdet
zs.append(z)
x = self.squeeze(x)
z, logdet = self.flowstep(x)
sum_logdets = sum_logdets + logdet
# gaussianize the final z
z, logdet = self.gaussianize(torch.zeros_like(z), z)
sum_logdets = sum_logdets + logdet
zs.append(z)
return zs, sum_logdets
def inverse(self, zs=None, batch_size=None, z_std=1.):
if zs is None: # if no random numbers are passed, generate new from the base distribution
assert batch_size is not None, 'Must either specify batch_size or pass a batch of z random numbers.'
zs = [z_std * self.base_dist.sample((batch_size, *self.output_dims)).squeeze()]
# pass through inverse flow
z, sum_logdets = self.gaussianize.inverse(torch.zeros_like(zs[-1]), zs[-1])
x, logdet = self.flowstep.inverse(z)
sum_logdets = sum_logdets + logdet
x = self.squeeze.inverse(x)
for i, m in enumerate(reversed(self.flowlevels)):
z = z_std * (self.base_dist.sample(x.shape).squeeze() if len(zs)==1 else zs[-i-2]) # if no z's are passed, generate new random numbers from the base dist
x, logdet = m.inverse(x, z)
sum_logdets = sum_logdets + logdet
# postprocess
x, logdet = self.preprocess.inverse(x)
sum_logdets = sum_logdets + logdet
return x, sum_logdets
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def log_prob(self, x, bits_per_pixel=False):
zs, logdet = self.forward(x)
log_prob = sum(self.base_dist.log_prob(z).sum([1,2,3]) for z in zs) + logdet
if bits_per_pixel:
log_prob /= (math.log(2) * x[0].numel())
return log_prob
# --------------------
# Train and evaluate
# --------------------
@torch.no_grad()
def data_dependent_init(model, args):
# set up an iterator with batch size = batch_size_init and run through model
dataloader = fetch_dataloader(args, train=True, data_dependent_init=True)
model(next(iter(dataloader))[0].requires_grad_(True if args.checkpoint_grads else False).to(args.device))
del dataloader
return True
def train_epoch(model, dataloader, optimizer, writer, epoch, args):
model.train()
tic = time.time()
for i, (x,y) in enumerate(dataloader):
args.step += args.world_size
# warmup learning rate
if epoch <= args.n_epochs_warmup:
optimizer.param_groups[0]['lr'] = args.lr * min(1, args.step / (len(dataloader) * args.world_size * args.n_epochs_warmup))
x = x.requires_grad_(True if args.checkpoint_grads else False).to(args.device) # requires_grad needed for checkpointing
loss = - model.log_prob(x, bits_per_pixel=True).mean(0)
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm_clip)
optimizer.step()
# report stats
if i % args.log_interval == 0:
# compute KL divergence between base and each of the z's that the model produces
with torch.no_grad():
zs, _ = model(x)
kls = [D.kl.kl_divergence(D.Normal(z.mean(), z.std()), model.base_dist) for z in zs]
# write stats
if args.on_main_process:
et = time.time() - tic # elapsed time
tt = len(dataloader) * et / (i+1) # total time per epoch
print('Epoch: [{}/{}][{}/{}]\tStep: {}\tTime: elapsed {:.0f}m{:02.0f}s / total {:.0f}m{:02.0f}s\tLoss {:.4f}\t'.format(
epoch, args.start_epoch + args.n_epochs, i+1, len(dataloader), args.step, et//60, et%60, tt//60, tt%60, loss.item()))
# update writer
for j, kl in enumerate(kls):
writer.add_scalar('kl_level_{}'.format(j), kl.item(), args.step)
writer.add_scalar('train_bits_x', loss.item(), args.step)
# save and generate
if i % args.save_interval == 0:
# generate samples
samples = generate(model, n_samples=4, z_stds=[0., 0.25, 0.7, 1.0])
images = make_grid(samples.cpu(), nrow=4, pad_value=1)
# write stats and save checkpoints
if args.on_main_process:
save_image(images, os.path.join(args.output_dir, 'generated_sample_{}.png'.format(args.step)))
# save training checkpoint
torch.save({'epoch': epoch,
'global_step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'checkpoint.pt'))
torch.save(optimizer.state_dict(), os.path.join(args.output_dir, 'optim_checkpoint.pt'))
@torch.no_grad()
def evaluate(model, dataloader, args):
model.eval()
print('Evaluating ...', end='\r')
logprobs = []
for x,y in dataloader:
x = x.to(args.device)
logprobs.append(model.log_prob(x, bits_per_pixel=True))
logprobs = torch.cat(logprobs, dim=0).to(args.device)
logprob_mean, logprob_std = logprobs.mean(0), 2 * logprobs.std(0) / math.sqrt(len(dataloader.dataset))
return logprob_mean, logprob_std
@torch.no_grad()
def generate(model, n_samples, z_stds):
model.eval()
print('Generating ...', end='\r')
samples = []
for z_std in z_stds:
sample, _ = model.inverse(batch_size=n_samples, z_std=z_std)
log_probs = model.log_prob(sample, bits_per_pixel=True)
samples.append(sample[log_probs.argsort().flip(0)]) # sort by log_prob; flip high (left) to low (right)
return torch.cat(samples,0)
def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, writer, args):
global best_eval_logprob
for epoch in range(args.start_epoch, args.start_epoch + args.n_epochs):
if args.distributed:
train_dataloader.sampler.set_epoch(epoch)
train_epoch(model, train_dataloader, optimizer, writer, epoch, args)
# evaluate
if False:#epoch % args.eval_interval == 0:
eval_logprob_mean, eval_logprob_std = evaluate(model, test_dataloader, args)
print('Evaluate at epoch {}: bits_x = {:.3f} +/- {:.3f}'.format(epoch, eval_logprob_mean, eval_logprob_std))
# save best state
if args.on_main_process and eval_logprob_mean > best_eval_logprob:
best_eval_logprob = eval_logprob_mean
torch.save({'epoch': epoch,
'global_step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'best_model_checkpoint.pt'))
# --------------------
# Visualizations
# --------------------
def encode_dataset(model, dataloader):
model.eval()
zs = []
attrs = []
for i, (x,y) in enumerate(dataloader):
print('Encoding [{}/{}]'.format(i+1, len(dataloader)), end='\r')
x = x.to(args.device)
zs_i, _ = model(x)
zs.append(torch.cat([z.flatten(1) for z in zs_i], dim=1))
attrs.append(y)
zs = torch.cat(zs, dim=0)
attrs = torch.cat(attrs, dim=0)
print('Encoding completed.')
return zs, attrs
def compute_dz(zs, attrs, idx):
""" for a given attribute idx, compute the mean for all encoded z's corresponding to the positive and negative attribute """
z_pos = [zs[i] for i in range(len(zs)) if attrs[i][idx] == +1]
z_neg = [zs[i] for i in range(len(zs)) if attrs[i][idx] == -1]
# dz = z_pos - z_neg; where z_pos is mean of all encoded datapoints where attr is present;
return torch.stack(z_pos).mean(0) - torch.stack(z_neg).mean(0) # out tensor of shape (flattened zs dim,)
def get_manipulators(zs, attrs):
""" compute dz (= z_pos - z_neg) for each attribute """
print('Extracting manipulators...', end=' ')
dzs = 1.6 * torch.stack([compute_dz(zs, attrs, i) for i in range(attrs.shape[1])], dim=0) # compute dz for each attribute official code multiplies by 1.6 scalar here
print('Completed.')
return dzs # out (n_attributes, flattened zs dim)
def manipulate(model, z, dz, z_std, alpha):
# 1. record incoming shapes
z_dims = [z_.squeeze().shape for z_ in z]
z_numels = [z_.numel() for z_ in z]
# 2. flatten z into a vector and manipulate by alpha in the direction of dz
z = torch.cat([z_.flatten(1) for z_ in z], dim=1).to(dz.device)
z = z + dz * torch.tensor(alpha).float().view(-1,1).to(dz.device) # out (n_alphas, flattened zs dim)
# 3. reshape back to z shapes from each level of the model
zs = [z_.view((len(alpha), *dim)) for z_, dim in zip(z.split(z_numels, dim=1), z_dims)]
# 4. decode
return model.inverse(zs, z_std=z_std)[0]
def load_manipulators(model, args):
# construct dataloader with limited number of images
args.mini_data_size = 30000
# load z manipulators for each attribute
if os.path.exists(os.path.join(args.output_dir, 'z_manipulate.pt')):
z_manipulate = torch.load(os.path.join(args.output_dir, 'z_manipulate.pt'), map_location=args.device)
else:
# encode dataset, compute manipulators, store zs, attributes, and dzs
dataloader = fetch_dataloader(args, train=True)
zs, attrs = encode_dataset(model, dataloader)
z_manipulate = get_manipulators(zs, attrs)
torch.save(zs, os.path.join(args.output_dir, 'zs.pt'))
torch.save(attrs, os.path.join(args.output_dir, 'attrs.pt'))
torch.save(z_manipulate, os.path.join(args.output_dir, 'z_manipulate.pt'))
return z_manipulate
@torch.no_grad()
def visualize(model, args, attrs=None, alphas=None, img_path=None, n_examples=1):
""" manipulate an input image along a given attribute """
dataset = fetch_dataloader(args, train=False).dataset # pull the dataset to access transforms and attrs
# if no attrs passed, manipulate all of them
if not attrs:
attrs = list(range(len(dataset.attr_names)))
# if image is passed, manipulate only the image
if img_path:
from PIL import Image
img = Image.open(img_path)
x = dataset.transform(img) # transform image to tensor and encode
else: # take first n_examples from the dataset
x, _ = dataset[0]
z, _ = model(x.unsqueeze(0).to(args.device))
# get manipulors
z_manipulate = load_manipulators(model, args)
# decode the varied attributes
dec_x =[]
for attr_idx in attrs:
dec_x.append(manipulate(model, z, z_manipulate[attr_idx].unsqueeze(0), args.z_std, alphas))
return torch.stack(dec_x).cpu()
# --------------------
# Main
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
args.step = 0 # global step
args.output_dir = os.path.dirname(args.restore_file) if args.restore_file else os.path.join(args.output_dir, time.strftime('%Y-%m-%d_%H-%M-%S', time.gmtime()))
writer = None # init as None in case of multiprocessing; only main process performs write ops
# setup device and distributed training
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device('cuda:{}'.format(args.local_rank))
# initialize
torch.distributed.init_process_group(backend='nccl', init_method='env://')
# compute total world size (used to keep track of global step)
args.world_size = int(os.environ['WORLD_SIZE']) # torch.distributed.launch sets this to nproc_per_node * nnodes
else:
if torch.cuda.is_available(): args.local_rank = 0
args.device = torch.device('cuda:{}'.format(args.local_rank) if args.local_rank is not None else 'cpu')
# write ops only when on_main_process
# NOTE: local_rank unique only to the machine; only 1 process on each node is on_main_process;
# if shared file system, args.local_rank below should be replaced by global rank e.g. torch.distributed.get_rank()
args.on_main_process = (args.distributed and args.local_rank == 0) or not args.distributed
# setup seed
if args.seed:
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# load data; sets args.input_dims needed for setting up the model
train_dataloader = fetch_dataloader(args, train=True)
test_dataloader = fetch_dataloader(args, train=False)
# load model
model = Glow(args.width, args.depth, args.n_levels, args.input_dims, args.checkpoint_grads).to(args.device)
if args.distributed:
# NOTE: DistributedDataParallel will divide and allocate batch_size to all available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
else:
# for compatibility of saving/loading models, wrap non-distributed cpu/gpu model as well;
# ie state dict is based on model.module.layer keys, which now match between training distributed and running then locally
model = torch.nn.parallel.DataParallel(model)
# DataParalle and DistributedDataParallel are wrappers around the model; expose functions of the model directly
model.base_dist = model.module.base_dist
model.log_prob = model.module.log_prob
model.inverse = model.module.inverse
# load optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# load checkpoint if provided
if args.restore_file:
model_checkpoint = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(model_checkpoint['state_dict'])
optimizer.load_state_dict(torch.load(os.path.dirname(args.restore_file) + '/optim_checkpoint.pt', map_location=args.device))
args.start_epoch = model_checkpoint['epoch']
args.step = model_checkpoint['global_step']
# setup writer and outputs
if args.on_main_process:
writer = SummaryWriter(log_dir = args.output_dir)
# save settings
config = 'Parsed args:\n{}\n\n'.format(pprint.pformat(args.__dict__)) + \
'Num trainable params: {:,.0f}\n\n'.format(sum(p.numel() for p in model.parameters())) + \
'Model:\n{}'.format(model)
config_path = os.path.join(args.output_dir, 'config.txt')
writer.add_text('model_config', config)
if not os.path.exists(config_path):
with open(config_path, 'a') as f:
print(config, file=f)
if args.train:
# run data dependent init and train
data_dependent_init(model, args)
train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, writer, args)
if args.evaluate:
logprob_mean, logprob_std = evaluate(model, test_dataloader, args)
print('Evaluate: bits_x = {:.3f} +/- {:.3f}'.format(logprob_mean, logprob_std))
if args.generate:
n_samples = 4
z_std = [0., 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] if not args.z_std else n_samples * [args.z_std]
samples = generate(model, n_samples, z_std)
images = make_grid(samples.cpu(), nrow=n_samples, pad_value=1)
save_image(images, os.path.join(args.output_dir,
'generated_samples_at_z_std_{}.png'.format('range' if args.z_std is None else args.z_std)))
if args.visualize:
if not args.z_std: args.z_std = 0.6
if not args.vis_alphas: args.vis_alphas = [-2,-1,0,1,2]
dec_x = visualize(model, args, args.vis_attrs, args.vis_alphas, args.vis_img) # output (n_attr, n_alpha, 3, H, W)
filename = 'manipulated_sample' if not args.vis_img else \
'manipulated_img_{}'.format(os.path.basename(args.vis_img).split('.')[0])
if args.vis_attrs:
filename += '_attr_' + ','.join(map(str, args.vis_attrs))
save_image(dec_x.view(-1, *args.input_dims), os.path.join(args.output_dir, filename + '.png'), nrow=dec_x.shape[1])
if args.on_main_process:
writer.close()
| 35,698 | 45.302205 | 181 | py |
normalizing_flows | normalizing_flows-master/bnaf.py | """
Implementation of Block Neural Autoregressive Flow
http://arxiv.org/abs/1904.04676
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch.utils.data import DataLoader, TensorDataset
import math
import os
import time
import argparse
import pprint
from functools import partial
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--plot', action='store_true', help='Plot a flow and target density.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--cuda', type=int, help='Which GPU to run on.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
# target density
parser.add_argument('--dataset', type=str, help='Which potential function to approximate.')
# model parameters
parser.add_argument('--data_dim', type=int, default=2, help='Dimension of the data.')
parser.add_argument('--hidden_dim', type=int, default=100, help='Dimensions of hidden layers.')
parser.add_argument('--n_hidden', type=int, default=3, help='Number of hidden layers.')
# training parameters
parser.add_argument('--step', type=int, default=0, help='Current step of training (number of minibatches processed).')
parser.add_argument('--n_steps', type=int, default=1, help='Number of steps to train.')
parser.add_argument('--batch_size', type=int, default=200, help='Training batch size.')
parser.add_argument('--lr', type=float, default=1e-1, help='Initial learning rate.')
parser.add_argument('--lr_decay', type=float, default=0.5, help='Learning rate decay.')
parser.add_argument('--lr_patience', type=float, default=2000, help='Number of steps before decaying learning rate.')
parser.add_argument('--log_interval', type=int, default=50, help='How often to save model and samples.')
# --------------------
# Data
# --------------------
def potential_fn(dataset):
# NF paper table 1 energy functions
w1 = lambda z: torch.sin(2 * math.pi * z[:,0] / 4)
w2 = lambda z: 3 * torch.exp(-0.5 * ((z[:,0] - 1)/0.6)**2)
w3 = lambda z: 3 * torch.sigmoid((z[:,0] - 1) / 0.3)
if dataset == 'u1':
return lambda z: 0.5 * ((torch.norm(z, p=2, dim=1) - 2) / 0.4)**2 - \
torch.log(torch.exp(-0.5*((z[:,0] - 2) / 0.6)**2) + \
torch.exp(-0.5*((z[:,0] + 2) / 0.6)**2) + 1e-10)
elif dataset == 'u2':
return lambda z: 0.5 * ((z[:,1] - w1(z)) / 0.4)**2
elif dataset == 'u3':
return lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.35)**2) + \
torch.exp(-0.5*((z[:,1] - w1(z) + w2(z))/0.35)**2) + 1e-10)
elif dataset == 'u4':
return lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.4)**2) + \
torch.exp(-0.5*((z[:,1] - w1(z) + w3(z))/0.35)**2) + 1e-10)
else:
raise RuntimeError('Invalid potential name to sample from.')
def sample_2d_data(dataset, n_samples):
z = torch.randn(n_samples, 2)
if dataset == '8gaussians':
scale = 4
sq2 = 1/math.sqrt(2)
centers = [(1,0), (-1,0), (0,1), (0,-1), (sq2,sq2), (-sq2,sq2), (sq2,-sq2), (-sq2,-sq2)]
centers = torch.tensor([(scale * x, scale * y) for x,y in centers])
return sq2 * (0.5 * z + centers[torch.randint(len(centers), size=(n_samples,))])
elif dataset == '2spirals':
n = torch.sqrt(torch.rand(n_samples // 2)) * 540 * (2 * math.pi) / 360
d1x = - torch.cos(n) * n + torch.rand(n_samples // 2) * 0.5
d1y = torch.sin(n) * n + torch.rand(n_samples // 2) * 0.5
x = torch.cat([torch.stack([ d1x, d1y], dim=1),
torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3
return x + 0.1*z
elif dataset == 'checkerboard':
x1 = torch.rand(n_samples) * 4 - 2
x2_ = torch.rand(n_samples) - torch.randint(0, 2, (n_samples,), dtype=torch.float) * 2
x2 = x2_ + x1.floor() % 2
return torch.stack([x1, x2], dim=1) * 2
elif dataset == 'rings':
n_samples4 = n_samples3 = n_samples2 = n_samples // 4
n_samples1 = n_samples - n_samples4 - n_samples3 - n_samples2
# so as not to have the first point = last point, set endpoint=False in np; here shifted by one
linspace4 = torch.linspace(0, 2 * math.pi, n_samples4 + 1)[:-1]
linspace3 = torch.linspace(0, 2 * math.pi, n_samples3 + 1)[:-1]
linspace2 = torch.linspace(0, 2 * math.pi, n_samples2 + 1)[:-1]
linspace1 = torch.linspace(0, 2 * math.pi, n_samples1 + 1)[:-1]
circ4_x = torch.cos(linspace4)
circ4_y = torch.sin(linspace4)
circ3_x = torch.cos(linspace4) * 0.75
circ3_y = torch.sin(linspace3) * 0.75
circ2_x = torch.cos(linspace2) * 0.5
circ2_y = torch.sin(linspace2) * 0.5
circ1_x = torch.cos(linspace1) * 0.25
circ1_y = torch.sin(linspace1) * 0.25
x = torch.stack([torch.cat([circ4_x, circ3_x, circ2_x, circ1_x]),
torch.cat([circ4_y, circ3_y, circ2_y, circ1_y])], dim=1) * 3.0
# random sample
x = x[torch.randint(0, n_samples, size=(n_samples,))]
# Add noise
return x + torch.normal(mean=torch.zeros_like(x), std=0.08*torch.ones_like(x))
else:
raise RuntimeError('Invalid `dataset` to sample from.')
# --------------------
# Model components
# --------------------
class MaskedLinear(nn.Module):
def __init__(self, in_features, out_features, data_dim):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.data_dim = data_dim
# Notation:
# BNAF weight calculation for (eq 8): W = g(W) * M_d + W * M_o
# where W is block lower triangular so model is autoregressive,
# g = exp function; M_d is block diagonal mask; M_o is block off-diagonal mask.
# Weight Normalization (Salimans & Kingma, eq 2): w = g * v / ||v||
# where g is scalar, v is k-dim vector, ||v|| is Euclidean norm
# ------
# Here: pre-weight norm matrix is v; then: v = exp(weight) * mask_d + weight * mask_o
# weight-norm scalar is g: out_features dimensional vector (here logg is used instead to avoid taking logs in the logdet calc.
# then weight-normed weight matrix is w = g * v / ||v||
#
# log det jacobian of block lower triangular is taking block diagonal mask of
# log(g*v/||v||) = log(g) + log(v) - log(||v||)
# = log(g) + weight - log(||v||) since v = exp(weight) * mask_d + weight * mask_o
weight = torch.zeros(out_features, in_features)
mask_d = torch.zeros_like(weight)
mask_o = torch.zeros_like(weight)
for i in range(data_dim):
# select block slices
h = slice(i * out_features // data_dim, (i+1) * out_features // data_dim)
w = slice(i * in_features // data_dim, (i+1) * in_features // data_dim)
w_row = slice(0, (i+1) * in_features // data_dim)
# initialize block-lower-triangular weight and construct block diagonal mask_d and lower triangular mask_o
nn.init.kaiming_uniform_(weight[h,w_row], a=math.sqrt(5)) # default nn.Linear weight init only block-wise
mask_d[h,w] = 1
mask_o[h,w_row] = 1
mask_o = mask_o - mask_d # remove diagonal so mask_o is lower triangular 1-off the diagonal
self.weight = nn.Parameter(weight) # pre-mask, pre-weight-norm
self.logg = nn.Parameter(torch.rand(out_features, 1).log()) # weight-norm parameter
self.bias = nn.Parameter(nn.init.uniform_(torch.rand(out_features), -1/math.sqrt(in_features), 1/math.sqrt(in_features))) # default nn.Linear bias init
self.register_buffer('mask_d', mask_d)
self.register_buffer('mask_o', mask_o)
def forward(self, x, sum_logdets):
# 1. compute BNAF masked weight eq 8
v = self.weight.exp() * self.mask_d + self.weight * self.mask_o
# 2. weight normalization
v_norm = v.norm(p=2, dim=1, keepdim=True)
w = self.logg.exp() * v / v_norm
# 3. compute output and logdet of the layer
out = F.linear(x, w, self.bias)
logdet = self.logg + self.weight - 0.5 * v_norm.pow(2).log()
logdet = logdet[self.mask_d.byte()]
logdet = logdet.view(1, self.data_dim, out.shape[1]//self.data_dim, x.shape[1]//self.data_dim) \
.expand(x.shape[0],-1,-1,-1) # output (B, data_dim, out_dim // data_dim, in_dim // data_dim)
# 4. sum with sum_logdets from layers before (BNAF section 3.3)
# Compute log det jacobian of the flow (eq 9, 10, 11) using log-matrix multiplication of the different layers.
# Specifically for two successive MaskedLinear layers A -> B with logdets A and B of shapes
# logdet A is (B, data_dim, outA_dim, inA_dim)
# logdet B is (B, data_dim, outB_dim, inB_dim) where outA_dim = inB_dim
#
# Note -- in the first layer, inA_dim = in_features//data_dim = 1 since in_features == data_dim.
# thus logdet A is (B, data_dim, outA_dim, 1)
#
# Then:
# logsumexp(A.transpose(2,3) + B) = logsumexp( (B, data_dim, 1, outA_dim) + (B, data_dim, outB_dim, inB_dim) , dim=-1)
# = logsumexp( (B, data_dim, 1, outA_dim) + (B, data_dim, outB_dim, outA_dim), dim=-1)
# = logsumexp( (B, data_dim, outB_dim, outA_dim), dim=-1) where dim2 of tensor1 is broadcasted
# = (B, data_dim, outB_dim, 1)
sum_logdets = torch.logsumexp(sum_logdets.transpose(2,3) + logdet, dim=-1, keepdim=True)
return out, sum_logdets
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Tanh(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, sum_logdets):
# derivation of logdet:
# d/dx tanh = 1 / cosh^2; cosh = (1 + exp(-2x)) / (2*exp(-x))
# log d/dx tanh = - 2 * log cosh = -2 * (x - log 2 + log(1 + exp(-2x)))
logdet = -2 * (x - math.log(2) + F.softplus(-2*x))
sum_logdets = sum_logdets + logdet.view_as(sum_logdets)
return x.tanh(), sum_logdets
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x):
sum_logdets = torch.zeros(1, x.shape[1], 1, 1, device=x.device)
for module in self:
x, sum_logdets = module(x, sum_logdets)
return x, sum_logdets.squeeze()
# --------------------
# Model
# --------------------
class BNAF(nn.Module):
def __init__(self, data_dim, n_hidden, hidden_dim):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(data_dim))
self.register_buffer('base_dist_var', torch.ones(data_dim))
# construct model
modules = []
modules += [MaskedLinear(data_dim, hidden_dim, data_dim), Tanh()]
for _ in range(n_hidden):
modules += [MaskedLinear(hidden_dim, hidden_dim, data_dim), Tanh()]
modules += [MaskedLinear(hidden_dim, data_dim, data_dim)]
self.net = FlowSequential(*modules)
# TODO -- add permutation
# add residual gate
# add stack of flows
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x):
return self.net(x)
def compute_kl_qp_loss(model, target_potential_fn, batch_size):
""" Compute BNAF eq 3 & 20:
KL(q_inv||p) where q_inv is the inverse flow transform (log_q_inv = log_q_base - logdet), p is the target distribution (energy potential)
Returns the minimization objective for density matching. """
z = model.base_dist.sample((batch_size,))
q_log_prob = model.base_dist.log_prob(z)
zk, logdet = model(z)
p_log_prob = - target_potential_fn(zk) # p = exp(-potential) => log_p = - potential
return q_log_prob.sum(1) - logdet.sum(1) - p_log_prob # BNAF eq 20
def compute_kl_pq_loss(model, sample_2d_data_fn, batch_size):
""" Compute BNAF eq 2 & 16:
KL(p||q_fwd) where q_fwd is the forward flow transform (log_q_fwd = log_q_base + logdet), p is the target distribution.
Returns the minimization objective for density estimation (NLL under the flow since the entropy of the target dist is fixed wrt the optimization) """
sample = sample_2d_data_fn(batch_size).to(model.base_dist.loc.device)
z, logdet = model(sample)
return - torch.sum(model.base_dist.log_prob(z) + logdet, dim=1)
# --------------------
# Training
# --------------------
def train_flow(model, potential_or_sampling_fn, loss_fn, optimizer, scheduler, args):
model.train()
with tqdm(total=args.n_steps, desc='Start step {}; Training for {} steps'.format(args.step, args.n_steps)) as pbar:
for _ in range(args.n_steps):
args.step += 1
loss = loss_fn(model, potential_or_sampling_fn, args.batch_size).mean(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step(loss)
pbar.set_postfix(loss = '{:.3f}'.format(loss.item()))
pbar.update()
if args.step % args.log_interval == 0:
# save model
torch.save({'step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'checkpoint.pt'))
torch.save({'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()},
os.path.join(args.output_dir, 'optim_checkpoint.pt'))
# plot and save results
plot(model, potential_or_sampling_fn, args)
# --------------------
# Plotting
# --------------------
@torch.no_grad()
def plot(model, potential_or_sampling_fn, args):
n_pts = 1000
range_lim = 4
# construct test points
test_grid = setup_grid(range_lim, n_pts, args)
# plot
if args.samples:
fig, axs = plt.subplots(1, 2, figsize=(8,4), subplot_kw={'aspect': 'equal'})
plot_samples(potential_or_sampling_fn, axs[0], range_lim, n_pts)
plot_fwd_flow_density(model, axs[1], test_grid, n_pts, args.batch_size)
else:
fig, axs = plt.subplots(1, 3, figsize=(12,4.3), subplot_kw={'aspect': 'equal'})
plot_potential(potential_or_sampling_fn, axs[0], test_grid, n_pts)
plot_inv_flow_density(model, axs[1], test_grid, n_pts, args.batch_size)
plot_flow_samples(model, axs[2], n_pts, args.batch_size)
# format
for ax in plt.gcf().axes: format_ax(ax, range_lim)
plt.tight_layout()
# save
plt.savefig(os.path.join(args.output_dir, 'vis_step_{}.png'.format(args.step)))
plt.close()
def setup_grid(range_lim, n_pts, args):
x = torch.linspace(-range_lim, range_lim, n_pts)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=1)
return xx, yy, zz.to(args.device)
def format_ax(ax, range_lim):
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
def plot_potential(potential_fn, ax, test_grid, n_pts):
xx, yy, zz = test_grid
ax.pcolormesh(xx, yy, torch.exp(-potential_fn(zz)).view(n_pts,n_pts).cpu().data, cmap=plt.cm.jet)
ax.set_title('Target density')
def plot_samples(samples_fn, ax, range_lim, n_pts):
samples = samples_fn(n_pts**2).numpy()
ax.hist2d(samples[:,0], samples[:,1], range=[[-range_lim, range_lim], [-range_lim, range_lim]], bins=n_pts, cmap=plt.cm.jet)
ax.set_title('Target samples')
def plot_flow_samples(model, ax, n_pts, batch_size):
z = model.base_dist.sample((n_pts**2,))
zk = torch.cat([model(z_)[0] for z_ in z.split(batch_size, dim=0)], 0)
zk = zk.cpu().numpy()
# plot
ax.hist2d(zk[:,0], zk[:,1], bins=n_pts, cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow samples')
def plot_fwd_flow_density(model, ax, test_grid, n_pts, batch_size):
""" plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet) """
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdets = [], []
for zz_i in zz.split(batch_size, dim=0):
zzk_i, logdets_i = model(zz_i)
zzk += [zzk_i]
logdets += [logdets_i]
zzk, logdets = torch.cat(zzk, 0), torch.cat(logdets, 0)
log_prob = model.base_dist.log_prob(zzk) + logdets
prob = log_prob.sum(1).exp().cpu()
# plot
ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow density')
def plot_inv_flow_density(model, ax, test_grid, n_pts, batch_size):
""" plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet) """
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdets = [], []
for zz_i in zz.split(batch_size, dim=0):
zzk_i, logdets_i = model(zz_i)
zzk += [zzk_i]
logdets += [logdets_i]
zzk, logdets = torch.cat(zzk, 0), torch.cat(logdets, 0)
log_q0 = model.base_dist.log_prob(zz)
log_qk = log_q0 - logdets
qk = log_qk.sum(1).exp().cpu()
zzk = zzk.cpu()
# plot
ax.pcolormesh(zzk[:,0].view(n_pts,n_pts), zzk[:,1].view(n_pts,n_pts), qk.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow density')
if __name__ == '__main__':
args = parser.parse_args()
args.output_dir = os.path.dirname(args.restore_file) if args.restore_file else os.path.join(args.output_dir, time.strftime('%Y-%m-%d_%H-%M-%S', time.gmtime()))
if not os.path.isdir(args.output_dir): os.makedirs(args.output_dir)
args.device = torch.device('cuda:{}'.format(args.cuda) if args.cuda is not None and torch.cuda.is_available() else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
model = BNAF(args.data_dim, args.n_hidden, args.hidden_dim).to(args.device)
if args.restore_file:
model_checkpoint = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(model_checkpoint['state_dict'])
args.step = model_checkpoint['step']
# save settings
config = 'Parsed args:\n{}\n\n'.format(pprint.pformat(args.__dict__)) + \
'Num trainable params: {:,.0f}\n\n'.format(sum(p.numel() for p in model.parameters())) + \
'Model:\n{}'.format(model)
config_path = os.path.join(args.output_dir, 'config.txt')
if not os.path.exists(config_path):
with open(config_path, 'a') as f:
print(config, file=f)
# setup data -- density to estimate/match
args.samples = not (args.dataset.startswith('u') and len(args.dataset) == 2)
if args.samples:
# target is density to estimate
potential_or_sampling_fn = partial(sample_2d_data, args.dataset)
loss_fn = compute_kl_pq_loss
else:
# target is energy potential to match
potential_or_sampling_fn = potential_fn(args.dataset)
loss_fn = compute_kl_qp_loss
if args.train:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.lr_patience, verbose=True)
if args.restore_file:
optim_checkpoint = torch.load(os.path.dirname(args.restore_file) + '/optim_checkpoint.pt', map_location=args.device)
optimizer.load_state_dict(optim_checkpoint['optimizer'])
scheduler.load_state_dict(optim_checkpoint['scheduler'])
train_flow(model, potential_or_sampling_fn, loss_fn, optimizer, scheduler, args)
if args.plot:
plot(model, potential_or_sampling_fn, args)
| 20,690 | 42.836864 | 163 | py |
normalizing_flows | normalizing_flows-master/maf.py | """
Masked Autoregressive Flow for Density Estimation
arXiv:1705.07057v4
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import torchvision.transforms as T
from torchvision.utils import save_image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import math
import argparse
import pprint
import copy
from data import fetch_dataloaders
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--generate', action='store_true', help='Generate samples from a model.')
parser.add_argument('--data_dir', default='./data/', help='Location of datasets.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--results_file', default='results.txt', help='Filename where to store settings and test results.')
parser.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
# data
parser.add_argument('--dataset', default='toy', help='Which dataset to use.')
parser.add_argument('--flip_toy_var_order', action='store_true', help='Whether to flip the toy dataset variable order to (x2, x1).')
parser.add_argument('--seed', type=int, default=1, help='Random seed to use.')
# model
parser.add_argument('--model', default='maf', help='Which model to use: made, maf.')
# made parameters
parser.add_argument('--n_blocks', type=int, default=5, help='Number of blocks to stack in a model (MADE in MAF; Coupling+BN in RealNVP).')
parser.add_argument('--n_components', type=int, default=1, help='Number of Gaussian clusters for mixture of gaussians models.')
parser.add_argument('--hidden_size', type=int, default=100, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
parser.add_argument('--activation_fn', type=str, default='relu', help='What activation function to use in the MADEs.')
parser.add_argument('--input_order', type=str, default='sequential', help='What input order to use (sequential | random).')
parser.add_argument('--conditional', default=False, action='store_true', help='Whether to use a conditional model.')
parser.add_argument('--no_batch_norm', action='store_true')
# training params
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--start_epoch', default=0, help='Starting epoch (for logging; to be overwritten when restoring file.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate.')
parser.add_argument('--log_interval', type=int, default=1000, help='How often to show loss statistics and save samples.')
# --------------------
# Model layers and helpers
# --------------------
def create_masks(input_size, hidden_size, n_hidden, input_order='sequential', input_degrees=None):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_order == 'sequential':
degrees += [torch.arange(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += [torch.arange(input_size) % input_size - 1] if input_degrees is None else [input_degrees % input_size - 1]
elif input_order == 'random':
degrees += [torch.randperm(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (input_size,)) - 1] if input_degrees is None else [input_degrees - 1]
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
return masks, degrees[0]
class MaskedLinear(nn.Linear):
""" MADE building block layer """
def __init__(self, input_size, n_outputs, mask, cond_label_size=None):
super().__init__(input_size, n_outputs)
self.register_buffer('mask', mask)
self.cond_label_size = cond_label_size
if cond_label_size is not None:
self.cond_weight = nn.Parameter(torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size))
def forward(self, x, y=None):
out = F.linear(x, self.weight * self.mask, self.bias)
if y is not None:
out = out + F.linear(y, self.cond_weight)
return out
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
) + (self.cond_label_size != None) * ', cond_features={}'.format(self.cond_label_size)
class LinearMaskedCoupling(nn.Module):
""" Modified RealNVP Coupling Layers per the MAF paper """
def __init__(self, input_size, hidden_size, n_hidden, mask, cond_label_size=None):
super().__init__()
self.register_buffer('mask', mask)
# scale function
s_net = [nn.Linear(input_size + (cond_label_size if cond_label_size is not None else 0), hidden_size)]
for _ in range(n_hidden):
s_net += [nn.Tanh(), nn.Linear(hidden_size, hidden_size)]
s_net += [nn.Tanh(), nn.Linear(hidden_size, input_size)]
self.s_net = nn.Sequential(*s_net)
# translation function
self.t_net = copy.deepcopy(self.s_net)
# replace Tanh with ReLU's per MAF paper
for i in range(len(self.t_net)):
if not isinstance(self.t_net[i], nn.Linear): self.t_net[i] = nn.ReLU()
def forward(self, x, y=None):
# apply mask
mx = x * self.mask
# run through model
s = self.s_net(mx if y is None else torch.cat([y, mx], dim=1))
t = self.t_net(mx if y is None else torch.cat([y, mx], dim=1))
u = mx + (1 - self.mask) * (x - t) * torch.exp(-s) # cf RealNVP eq 8 where u corresponds to x (here we're modeling u)
log_abs_det_jacobian = - (1 - self.mask) * s # log det du/dx; cf RealNVP 8 and 6; note, sum over input_size done at model log_prob
return u, log_abs_det_jacobian
def inverse(self, u, y=None):
# apply mask
mu = u * self.mask
# run through model
s = self.s_net(mu if y is None else torch.cat([y, mu], dim=1))
t = self.t_net(mu if y is None else torch.cat([y, mu], dim=1))
x = mu + (1 - self.mask) * (u * s.exp() + t) # cf RealNVP eq 7
log_abs_det_jacobian = (1 - self.mask) * s # log det dx/du
return x, log_abs_det_jacobian
class BatchNorm(nn.Module):
""" RealNVP BatchNorm layer """
def __init__(self, input_size, momentum=0.9, eps=1e-5):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer('running_mean', torch.zeros(input_size))
self.register_buffer('running_var', torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = x.var(0) # note MAF paper uses biased variance estimate; ie x.var(0, unbiased=False)
# update running mean
self.running_mean.mul_(self.momentum).add_(self.batch_mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(self.batch_var.data * (1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
# compute normalized input (cf original batch norm paper algo 1)
x_hat = (x - mean) / torch.sqrt(var + self.eps)
y = self.log_gamma.exp() * x_hat + self.beta
# compute log_abs_det_jacobian (cf RealNVP paper)
log_abs_det_jacobian = self.log_gamma - 0.5 * torch.log(var + self.eps)
# print('in sum log var {:6.3f} ; out sum log var {:6.3f}; sum log det {:8.3f}; mean log_gamma {:5.3f}; mean beta {:5.3f}'.format(
# (var + self.eps).log().sum().data.numpy(), y.var(0).log().sum().data.numpy(), log_abs_det_jacobian.mean(0).item(), self.log_gamma.mean(), self.beta.mean()))
return y, log_abs_det_jacobian.expand_as(x)
def inverse(self, y, cond_y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (y - self.beta) * torch.exp(-self.log_gamma)
x = x_hat * torch.sqrt(var + self.eps) + mean
log_abs_det_jacobian = 0.5 * torch.log(var + self.eps) - self.log_gamma
return x, log_abs_det_jacobian.expand_as(x)
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return u, sum_log_abs_det_jacobians
# --------------------
# Models
# --------------------
class MADE(nn.Module):
def __init__(self, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = - loga
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# MAF eq 3
D = u.shape[1]
x = torch.zeros_like(u)
# run through reverse model
for i in self.input_degrees:
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
x[:,i] = u[:,i] * torch.exp(loga[:,i]) + m[:,i]
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1)
class MADEMOG(nn.Module):
""" Mixture of Gaussians MADE """
def __init__(self, n_components, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
n_components -- scalar; number of gauassian components in the mixture
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
self.n_components = n_components
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, n_components * 3 * input_size, masks[-1].repeat(n_components * 3,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# shapes
N, L = x.shape
C = self.n_components
# MAF eq 2 -- parameters of Gaussians - mean, logsigma, log unnormalized cluster probabilities
m, loga, logr = self.net(self.net_input(x, y)).view(N, C, 3 * L).chunk(chunks=3, dim=-1) # out 3 x (N, C, L)
# MAF eq 4
x = x.repeat(1, C).view(N, C, L) # out (N, C, L)
u = (x - m) * torch.exp(-loga) # out (N, C, L)
# MAF eq 5
log_abs_det_jacobian = - loga # out (N, C, L)
# normalize cluster responsibilities
self.logr = logr - logr.logsumexp(1, keepdim=True) # out (N, C, L)
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# shapes
N, C, L = u.shape
# init output
x = torch.zeros(N, L).to(u.device)
# MAF eq 3
# run through reverse model along each L
for i in self.input_degrees:
m, loga, logr = self.net(self.net_input(x, y)).view(N, C, 3 * L).chunk(chunks=3, dim=-1) # out 3 x (N, C, L)
# normalize cluster responsibilities and sample cluster assignments from a categorical dist
logr = logr - logr.logsumexp(1, keepdim=True) # out (N, C, L)
z = D.Categorical(logits=logr[:,:,i]).sample().unsqueeze(-1) # out (N, 1)
u_z = torch.gather(u[:,:,i], 1, z).squeeze() # out (N, 1)
m_z = torch.gather(m[:,:,i], 1, z).squeeze() # out (N, 1)
loga_z = torch.gather(loga[:,:,i], 1, z).squeeze()
x[:,i] = u_z * torch.exp(loga_z) + m_z
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y) # u = (N,C,L); log_abs_det_jacobian = (N,C,L)
# marginalize cluster probs
log_probs = torch.logsumexp(self.logr + self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1) # sum over C; out (N, L)
return log_probs.sum(1) # sum over L; out (N,)
class MAF(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [MADE(input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, self.input_degrees)]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
class MAFMOG(nn.Module):
""" MAF on mixture of gaussian MADE """
def __init__(self, n_blocks, n_components, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu',
input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
self.maf = MAF(n_blocks, input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, batch_norm)
# get reversed input order from the last layer (note in maf model, input_degrees are already flipped in for-loop model constructor
input_degrees = self.maf.input_degrees#.flip(0)
self.mademog = MADEMOG(n_components, input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, input_degrees)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
u, maf_log_abs_dets = self.maf(x, y)
u, made_log_abs_dets = self.mademog(u, y)
sum_log_abs_det_jacobians = maf_log_abs_dets.unsqueeze(1) + made_log_abs_dets
return u, sum_log_abs_det_jacobians
def inverse(self, u, y=None):
x, made_log_abs_dets = self.mademog.inverse(u, y)
x, maf_log_abs_dets = self.maf.inverse(x, y)
sum_log_abs_det_jacobians = maf_log_abs_dets.unsqueeze(1) + made_log_abs_dets
return x, sum_log_abs_det_jacobians
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y) # u = (N,C,L); log_abs_det_jacobian = (N,C,L)
# marginalize cluster probs
log_probs = torch.logsumexp(self.mademog.logr + self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1) # out (N, L)
return log_probs.sum(1) # out (N,)
class RealNVP(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]
mask = 1 - mask
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
# --------------------
# Train and evaluate
# --------------------
def train(model, dataloader, optimizer, epoch, args):
for i, data in enumerate(dataloader):
model.train()
# check if labeled dataset
if len(data) == 1:
x, y = data[0], None
else:
x, y = data
y = y.to(args.device)
x = x.view(x.shape[0], -1).to(args.device)
loss = - model.log_prob(x, y if args.cond_label_size else None).mean(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % args.log_interval == 0:
print('epoch {:3d} / {}, step {:4d} / {}; loss {:.4f}'.format(
epoch, args.start_epoch + args.n_epochs, i, len(dataloader), loss.item()))
@torch.no_grad()
def evaluate(model, dataloader, epoch, args):
model.eval()
# conditional model
if args.cond_label_size is not None:
logprior = torch.tensor(1 / args.cond_label_size).log().to(args.device)
loglike = [[] for _ in range(args.cond_label_size)]
for i in range(args.cond_label_size):
# make one-hot labels
labels = torch.zeros(args.batch_size, args.cond_label_size).to(args.device)
labels[:,i] = 1
for x, y in dataloader:
x = x.view(x.shape[0], -1).to(args.device)
loglike[i].append(model.log_prob(x, labels))
loglike[i] = torch.cat(loglike[i], dim=0) # cat along data dim under this label
loglike = torch.stack(loglike, dim=1) # cat all data along label dim
# log p(x) = log ∑_y p(x,y) = log ∑_y p(x|y)p(y)
# assume uniform prior = log p(y) ∑_y p(x|y) = log p(y) + log ∑_y p(x|y)
logprobs = logprior + loglike.logsumexp(dim=1)
# TODO -- measure accuracy as argmax of the loglike
# unconditional model
else:
logprobs = []
for data in dataloader:
x = data[0].view(data[0].shape[0], -1).to(args.device)
logprobs.append(model.log_prob(x))
logprobs = torch.cat(logprobs, dim=0).to(args.device)
logprob_mean, logprob_std = logprobs.mean(0), 2 * logprobs.var(0).sqrt() / math.sqrt(len(dataloader.dataset))
output = 'Evaluate ' + (epoch != None)*'(epoch {}) -- '.format(epoch) + 'logp(x) = {:.3f} +/- {:.3f}'.format(logprob_mean, logprob_std)
print(output)
print(output, file=open(args.results_file, 'a'))
return logprob_mean, logprob_std
@torch.no_grad()
def generate(model, dataset_lam, args, step=None, n_row=10):
model.eval()
# conditional model
if args.cond_label_size:
samples = []
labels = torch.eye(args.cond_label_size).to(args.device)
for i in range(args.cond_label_size):
# sample model base distribution and run through inverse model to sample data space
u = model.base_dist.sample((n_row, args.n_components)).squeeze()
labels_i = labels[i].expand(n_row, -1)
sample, _ = model.inverse(u, labels_i)
log_probs = model.log_prob(sample, labels_i).sort(0)[1].flip(0) # sort by log_prob; take argsort idxs; flip high to low
samples.append(sample[log_probs])
samples = torch.cat(samples, dim=0)
# unconditional model
else:
u = model.base_dist.sample((n_row**2, args.n_components)).squeeze()
samples, _ = model.inverse(u)
log_probs = model.log_prob(samples).sort(0)[1].flip(0) # sort by log_prob; take argsort idxs; flip high to low
samples = samples[log_probs]
# convert and save images
samples = samples.view(samples.shape[0], *args.input_dims)
samples = (torch.sigmoid(samples) - dataset_lam) / (1 - 2 * dataset_lam)
filename = 'generated_samples' + (step != None)*'_epoch_{}'.format(step) + '.png'
save_image(samples, os.path.join(args.output_dir, filename), nrow=n_row, normalize=True)
def train_and_evaluate(model, train_loader, test_loader, optimizer, args):
best_eval_logprob = float('-inf')
for i in range(args.start_epoch, args.start_epoch + args.n_epochs):
train(model, train_loader, optimizer, i, args)
eval_logprob, _ = evaluate(model, test_loader, i, args)
# save training checkpoint
torch.save({'epoch': i,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'model_checkpoint.pt'))
# save model only
torch.save(model.state_dict(), os.path.join(args.output_dir, 'model_state.pt'))
# save best state
if eval_logprob > best_eval_logprob:
best_eval_logprob = eval_logprob
torch.save({'epoch': i,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'best_model_checkpoint.pt'))
# plot sample
if args.dataset == 'TOY':
plot_sample_and_density(model, train_loader.dataset.base_dist, args, step=i)
if args.dataset == 'MNIST':
generate(model, train_loader.dataset.lam, args, step=i)
# --------------------
# Plot
# --------------------
def plot_density(dist, ax, ranges, flip_var_order=False):
(xmin, xmax), (ymin, ymax) = ranges
# sample uniform grid
n = 200
xx1 = torch.linspace(xmin, xmax, n)
xx2 = torch.linspace(ymin, ymax, n)
xx, yy = torch.meshgrid(xx1, xx2)
xy = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze()
if flip_var_order:
xy = xy.flip(1)
# run uniform grid through model and plot
density = dist.log_prob(xy).exp()
ax.contour(xx, yy, density.view(n,n).data.numpy())
# format
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticks([xmin, xmax])
ax.set_yticks([ymin, ymax])
def plot_dist_sample(data, ax, ranges):
ax.scatter(data[:,0].data.numpy(), data[:,1].data.numpy(), s=10, alpha=0.4)
# format
(xmin, xmax), (ymin, ymax) = ranges
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticks([xmin, xmax])
ax.set_yticks([ymin, ymax])
def plot_sample_and_density(model, target_dist, args, ranges_density=[[-5,20],[-10,10]], ranges_sample=[[-4,4],[-4,4]], step=None):
model.eval()
fig, axs = plt.subplots(1, 2, figsize=(6,3))
# sample target distribution and pass through model
data = target_dist.sample((2000,))
u, _ = model(data)
# plot density and sample
plot_density(model, axs[0], ranges_density, args.flip_var_order)
plot_dist_sample(u, axs[1], ranges_sample)
# format and save
matplotlib.rcParams.update({'xtick.labelsize': 'xx-small', 'ytick.labelsize': 'xx-small'})
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, 'sample' + (step != None)*'_epoch_{}'.format(step) + '.png'))
plt.close()
# --------------------
# Run
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
# setup file ops
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
# setup device
args.device = torch.device('cuda:0' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# load data
if args.conditional: assert args.dataset in ['MNIST', 'CIFAR10'], 'Conditional inputs only available for labeled datasets MNIST and CIFAR10.'
train_dataloader, test_dataloader = fetch_dataloaders(args.dataset, args.batch_size, args.device, args.flip_toy_var_order)
args.input_size = train_dataloader.dataset.input_size
args.input_dims = train_dataloader.dataset.input_dims
args.cond_label_size = train_dataloader.dataset.label_size if args.conditional else None
# model
if args.model == 'made':
model = MADE(args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order)
elif args.model == 'mademog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
model = MADEMOG(args.n_components, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order)
elif args.model == 'maf':
model = MAF(args.n_blocks, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order, batch_norm=not args.no_batch_norm)
elif args.model == 'mafmog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
model = MAFMOG(args.n_blocks, args.n_components, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order, batch_norm=not args.no_batch_norm)
elif args.model =='realnvp':
model = RealNVP(args.n_blocks, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
batch_norm=not args.no_batch_norm)
else:
raise ValueError('Unrecognized model.')
model = model.to(args.device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-6)
if args.restore_file:
# load model and optimizer states
state = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(state['model_state'])
optimizer.load_state_dict(state['optimizer_state'])
args.start_epoch = state['epoch'] + 1
# set up paths
args.output_dir = os.path.dirname(args.restore_file)
args.results_file = os.path.join(args.output_dir, args.results_file)
print('Loaded settings and model:')
print(pprint.pformat(args.__dict__))
print(model)
print(pprint.pformat(args.__dict__), file=open(args.results_file, 'a'))
print(model, file=open(args.results_file, 'a'))
if args.train:
train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, args)
if args.evaluate:
evaluate(model, test_dataloader, None, args)
if args.generate:
if args.dataset == 'TOY':
base_dist = train_dataloader.dataset.base_dist
plot_sample_and_density(model, base_dist, args, ranges_density=[[-15,4],[-3,3]], ranges_sample=[[-1.5,1.5],[-3,3]])
elif args.dataset == 'MNIST':
generate(model, train_dataloader.dataset.lam, args)
| 31,985 | 41.762032 | 169 | py |
normalizing_flows | normalizing_flows-master/planar_flow.py | """
Variational Inference with Normalizing Flows
arXiv:1505.05770v6
"""
import torch
import torch.nn as nn
import torch.distributions as D
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import argparse
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--plot', action='store_true', help='Plot a flow and target density.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--output_dir', default='.', help='Path to output folder.')
parser.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
# target potential
parser.add_argument('--target_potential', choices=['u_z0', 'u_z5', 'u_z1', 'u_z2', 'u_z3', 'u_z4'], help='Which potential function to approximate.')
# flow params
parser.add_argument('--base_sigma', type=float, default=4, help='Std of the base isotropic 0-mean Gaussian distribution.')
parser.add_argument('--learn_base', default=False, action='store_true', help='Whether to learn a mu-sigma affine transform of the base distribution.')
parser.add_argument('--flow_length', type=int, default=2, help='Length of the flow.')
# training params
parser.add_argument('--init_sigma', type=float, default=1, help='Initialization std for the trainable flow parameters.')
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--start_step', type=int, default=0, help='Starting step (if resuming training will be overwrite from filename).')
parser.add_argument('--n_steps', type=int, default=1000000, help='Optimization steps.')
parser.add_argument('--lr', type=float, default=1e-5, help='Learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-3, help='Weight decay.')
parser.add_argument('--beta', type=float, default=1, help='Multiplier for the target potential loss.')
parser.add_argument('--seed', type=int, default=2, help='Random seed.')
# --------------------
# Flow
# --------------------
class PlanarTransform(nn.Module):
def __init__(self, init_sigma=0.01):
super().__init__()
self.u = nn.Parameter(torch.randn(1, 2).normal_(0, init_sigma))
self.w = nn.Parameter(torch.randn(1, 2).normal_(0, init_sigma))
self.b = nn.Parameter(torch.randn(1).fill_(0))
def forward(self, x, normalize_u=True):
# allow for a single forward pass over all the transforms in the flows with a Sequential container
if isinstance(x, tuple):
z, sum_log_abs_det_jacobians = x
else:
z, sum_log_abs_det_jacobians = x, 0
# normalize u s.t. w @ u >= -1; sufficient condition for invertibility
u_hat = self.u
if normalize_u:
wtu = (self.w @ self.u.t()).squeeze()
m_wtu = - 1 + torch.log1p(wtu.exp())
u_hat = self.u + (m_wtu - wtu) * self.w / (self.w @ self.w.t())
# compute transform
f_z = z + u_hat * torch.tanh(z @ self.w.t() + self.b)
# compute log_abs_det_jacobian
psi = (1 - torch.tanh(z @ self.w.t() + self.b)**2) @ self.w
det = 1 + psi @ u_hat.t()
log_abs_det_jacobian = torch.log(torch.abs(det) + 1e-6).squeeze()
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return f_z, sum_log_abs_det_jacobians
class AffineTransform(nn.Module):
def __init__(self, learnable=False):
super().__init__()
self.mu = nn.Parameter(torch.zeros(2)).requires_grad_(learnable)
self.logsigma = nn.Parameter(torch.zeros(2)).requires_grad_(learnable)
def forward(self, x):
z = self.mu + self.logsigma.exp() * x
sum_log_abs_det_jacobians = self.logsigma.sum()
return z, sum_log_abs_det_jacobians
# --------------------
# Test energy functions -- NF paper table 1
# --------------------
w1 = lambda z: torch.sin(2 * math.pi * z[:,0] / 4)
w2 = lambda z: 3 * torch.exp(-0.5 * ((z[:,0] - 1)/0.6)**2)
w3 = lambda z: 3 * torch.sigmoid((z[:,0] - 1) / 0.3)
u_z1 = lambda z: 0.5 * ((torch.norm(z, p=2, dim=1) - 2) / 0.4)**2 - \
torch.log(torch.exp(-0.5*((z[:,0] - 2) / 0.6)**2) + torch.exp(-0.5*((z[:,0] + 2) / 0.6)**2) + 1e-10)
u_z2 = lambda z: 0.5 * ((z[:,1] - w1(z)) / 0.4)**2
u_z3 = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.35)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w2(z))/0.35)**2) + 1e-10)
u_z4 = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.4)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w3(z))/0.35)**2) + 1e-10)
# --------------------
# Training
# --------------------
def optimize_flow(base_dist, flow, target_energy_potential, optimizer, args):
# anneal rate for free energy
temp = lambda i: min(1, 0.01 + i/10000)
for i in range(args.start_step, args.n_steps):
# sample base dist
z = base_dist.sample((args.batch_size, )).to(args.device)
# pass through flow:
# 1. compute expected log_prob of data under base dist -- nothing tied to parameters here so irrelevant to grads
base_log_prob = base_dist.log_prob(z)
# 2. compute sum of log_abs_det_jacobian through the flow
zk, sum_log_abs_det_jacobians = flow(z)
# 3. compute expected log_prob of z_k the target_energy potential
p_log_prob = - temp(i) * target_energy_potential(zk) # p = exp(-potential) ==> p_log_prob = - potential
loss = base_log_prob - sum_log_abs_det_jacobians - args.beta * p_log_prob
loss = loss.mean(0)
# compute loss and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10000 == 0:
# display loss
log_qk = base_dist.log_prob(z) - sum_log_abs_det_jacobians
print('{}: step {:5d} / {}; loss {:.3f}; base_log_prob {:.3f}, sum log dets {:.3f}, p_log_prob {:.3f}, max base = {:.3f}; max qk = {:.3f} \
zk_mean {}, zk_sigma {}; base_mu {}, base_log_sigma {}'.format(
args.target_potential, i, args.n_steps, loss.item(), base_log_prob.mean(0).item(), sum_log_abs_det_jacobians.mean(0).item(),
p_log_prob.mean(0).item(), base_log_prob.exp().max().item(), log_qk.exp().max().item(),
zk.mean(0).cpu().data.numpy(), zk.var(0).sqrt().cpu().data.numpy(),
base_dist.loc.cpu().data.numpy() if not args.learn_base else flow[0].mu.cpu().data.numpy(),
base_dist.covariance_matrix.cpu().diag().data.numpy() if not args.learn_base else flow[0].logsigma.cpu().data.numpy()))
# save model
torch.save({'step': i,
'flow_state': flow.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'model_state_flow_length_{}.pt'.format(args.flow_length)))
# plot and save results
with torch.no_grad():
plot_flow(base_dist, flow, os.path.join(args.output_dir, 'approximating_flow_step{}.png'.format(i)), args)
# --------------------
# Plotting
# --------------------
def plot_flow(base_dist, flow, filename, args):
n = 200
lim = 4
fig, axs = plt.subplots(2, 2, subplot_kw={'aspect': 'equal'})
# plot target density we're trying to approx
plot_target_density(u_z, axs[0,0], lim, n)
# plot posterior approx density
plot_flow_density(base_dist, flow, axs[0,1], lim, n)
# plot flow-transformed base dist sample and histogram
z = base_dist.sample((10000,))
zk, _ = flow(z)
zk = zk.cpu().data.numpy()
axs[1,0].scatter(zk[:,0], zk[:,1], s=10, alpha=0.4)
axs[1,1].hist2d(zk[:,0], zk[:,1], bins=lim*50, cmap=plt.cm.jet)
for ax in plt.gcf().axes:
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(True)
ax.invert_yaxis()
plt.tight_layout()
plt.savefig(filename)
plt.close()
def plot_target_density(u_z, ax, range_lim=4, n=200, output_dir=None):
x = torch.linspace(-range_lim, range_lim, n)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze().to(args.device)
ax.pcolormesh(xx, yy, torch.exp(-u_z(zz)).view(n,n).data, cmap=plt.cm.jet)
for ax in plt.gcf().axes:
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
if output_dir:
plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'target_potential_density.png'))
plt.close()
def plot_flow_density(base_dist, flow, ax, range_lim=4, n=200, output_dir=None):
x = torch.linspace(-range_lim, range_lim, n)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze().to(args.device)
# plot posterior approx density
zzk, sum_log_abs_det_jacobians = flow(zz)
log_q0 = base_dist.log_prob(zz)
log_qk = log_q0 - sum_log_abs_det_jacobians
qk = log_qk.exp().cpu()
zzk = zzk.cpu()
ax.pcolormesh(zzk[:,0].view(n,n).data, zzk[:,1].view(n,n).data, qk.view(n,n).data, cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
for ax in plt.gcf().axes:
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
if output_dir:
plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'flow_k{}_density.png'.format(len(flow)-1)))
plt.close()
# --------------------
# Run
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
args.device = torch.device('cuda:0' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# setup flow
flow = nn.Sequential(AffineTransform(args.learn_base), *[PlanarTransform() for _ in range(args.flow_length)]).to(args.device)
# setup target potential to approx
u_z = vars()[args.target_potential]
# setup base distribution
base_dist = D.MultivariateNormal(torch.zeros(2).to(args.device), args.base_sigma * torch.eye(2).to(args.device))
if args.restore_file:
# get filename
filename = os.path.basename(args.restore_file)
args.flow_length = int(filename.partition('length_')[-1].rpartition('.')[0])
# reset output dir
args.output_dir = os.path.dirname(args.restore_file)
# load state
state = torch.load(args.restore_file, map_location=args.device)
# compatibility code;
# 1/ earlier models did not include step and optimizer checkpoints;
try:
flow_state = state['flow_state']
optimizer_state = state['optimizer_state']
args.start_step = state['step']
except KeyError:
# if state is not a dict, load just the model state
flow_state = state
optimizer_state = None
# 2/ some saved checkpoints may not have a first affine layer
try:
flow_state['0.mu']
except KeyError:
# if no first affine layer, reload a flow model without one
flow = nn.Sequential(*[PlanarTransform(args.init_sigma) for _ in range(args.flow_length)])
flow.load_state_dict(flow_state)
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
if args.train:
optimizer = torch.optim.RMSprop(flow.parameters(), lr=args.lr, momentum=0.9, alpha=0.90, eps=1e-6, weight_decay=args.weight_decay)
if args.restore_file and optimizer_state:
optimizer.load_state_dict(optimizer_state)
args.n_steps = args.start_step + args.n_steps
optimize_flow(base_dist, flow, u_z, optimizer, args)
if args.evaluate:
plot_flow(base_dist, flow, os.path.join(args.output_dir, 'approximating_flow.png'), args)
if args.plot:
plot_target_density(u_z, plt.gca(), output_dir=args.output_dir)
plot_flow_density(base_dist, flow, plt.gca(), output_dir=args.output_dir)
| 12,324 | 39.811258 | 151 | py |
normalizing_flows | normalizing_flows-master/datasets/moons.py | import torch
import torch.distributions as D
from torch.utils.data import Dataset
from sklearn.datasets import make_moons
class MOONS(Dataset):
def __init__(self, dataset_size=25000, **kwargs):
self.x, self.y = make_moons(n_samples=dataset_size, shuffle=True, noise=0.05)
self.input_size = 2
self.label_size = 2
self.dataset_size = dataset_size
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.x[i], self.y[i]
| 512 | 20.375 | 85 | py |
normalizing_flows | normalizing_flows-master/datasets/toy.py | import torch
import torch.distributions as D
from torch.utils.data import Dataset
class ToyDistribution(D.Distribution):
def __init__(self, flip_var_order):
super().__init__()
self.flip_var_order = flip_var_order
self.p_x2 = D.Normal(0, 4)
self.p_x1 = lambda x2: D.Normal(0.25 * x2**2, 1)
def rsample(self, sample_shape=torch.Size()):
x2 = self.p_x2.sample(sample_shape)
x1 = self.p_x1(x2).sample()
if self.flip_var_order:
return torch.stack((x2, x1), dim=-1).squeeze()
else:
return torch.stack((x1, x2), dim=-1).squeeze()
def log_prob(self, value):
if self.flip_var_order:
value = value.flip(1)
return self.p_x1(value[:,1]).log_prob(value[:,0]) + self.p_x2.log_prob(value[:,1])
class TOY(Dataset):
def __init__(self, dataset_size=25000, flip_var_order=False):
self.input_size = 2
self.label_size = 1
self.dataset_size = dataset_size
self.base_dist = ToyDistribution(flip_var_order)
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.base_dist.sample(), torch.zeros(self.label_size)
| 1,214 | 27.255814 | 90 | py |
normalizing_flows | normalizing_flows-master/datasets/__init__.py | root = 'data/'
#from .power import POWER
#from .gas import GAS
#from .hepmass import HEPMASS
#from .miniboone import MINIBOONE
#from .bsds300 import BSDS300
#from .toy import TOY
#from .moons import MOONS
#from .mnist import MNIST
#from torchvision.datasets import MNIST, CIFAR10
| 283 | 19.285714 | 48 | py |
normalizing_flows | normalizing_flows-master/datasets/celeba.py | import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
class CelebA(Dataset):
processed_file = 'processed.pt'
partition_file = 'Eval/list_eval_partition.txt'
attr_file = 'Anno/list_attr_celeba.txt'
img_folder = 'Img/img_align_celeba'
attr_names = '5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young'.split()
def __init__(self, root, train=True, transform=None, mini_data_size=None):
self.root = os.path.join(os.path.expanduser(root), self.__class__.__name__)
self.transform = transform
# check if processed
if not os.path.exists(os.path.join(self.root, self.processed_file)):
self._process_and_save()
data = torch.load(os.path.join(self.root, self.processed_file))
if train:
self.data = data['train']
else:
self.data = data['val']
if mini_data_size != None:
self.data = self.data[:mini_data_size]
def __getitem__(self, idx):
filename, attr = self.data[idx]
img = Image.open(os.path.join(self.root, self.img_folder, filename)) # loads in RGB mode
if self.transform is not None:
img = self.transform(img)
attr = torch.from_numpy(attr)
return img, attr
def __len__(self):
return len(self.data)
def _process_and_save(self):
if not os.path.exists(os.path.join(self.root, self.attr_file)):
raise RuntimeError('Dataset attributes file not found at {}.'.format(os.path.join(self.root, self.attr_file)))
if not os.path.exists(os.path.join(self.root, self.partition_file)):
raise RuntimeError('Dataset evaluation partitions file not found at {}.'.format(os.path.join(self.root, self.partition_file)))
if not os.path.isdir(os.path.join(self.root, self.img_folder)):
raise RuntimeError('Dataset image folder not found at {}.'.format(os.path.join(self.root, self.img_folder)))
# read attributes file: list_attr_celeba.txt
# First Row: number of images
# Second Row: attribute names
# Rest of the Rows: <image_id> <attribute_labels>
with open(os.path.join(self.root, self.attr_file), 'r') as f:
lines = f.readlines()
n_files = int(lines[0])
attr = [[l.split()[0], l.split()[1:]] for l in lines[2:]] # [image_id.jpg, <attr_labels>]
assert len(attr) == n_files, \
'Mismatch b/n num entries in attributes file {} and reported num files {}'.format(len(attr), n_files)
# read partition file: list_eval_partition.txt;
# All Rows: <image_id> <evaluation_status>
# "0" represents training image,
# "1" represents validation image,
# "2" represents testing image;
data = [[], [], []] # train, val, test
unmatched = 0
with open(os.path.join(self.root, self.partition_file), 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
fname, split = line.split()
if attr[i][0] != fname:
unmatched += 1
continue
data[int(split)].append([fname, np.array(attr[i][1], dtype=np.float32)]) # [image_id.jpg, <attr_labels>] by train/val/test
if unmatched > 0: print('Unmatched partition filenames to attribute filenames: ', unmatched)
assert sum(len(s) for s in data) == n_files, \
'Mismatch b/n num entries in partition {} and reported num files {}'.format(sum(len(s) for s in filenames), n_files)
# check image folder
filenames = os.listdir(os.path.join(self.root, self.img_folder))
assert len(filenames) == n_files, \
'Mismatch b/n num files in image folder {} and report num files {}'.format(len(filenames), n_files)
# save
data = {'train': data[0], 'val': data[1], 'test': data[2]}
with open(os.path.join(self.root, self.processed_file), 'wb') as f:
torch.save(data, f)
if __name__ == '__main__':
d = CelebA('~/Data/')
print('Length: ', len(d))
print('Image: ', d[0][0])
print('Attr: ', d[0][1])
import timeit
t = timeit.timeit('d[np.random.randint(0,len(d))]', number=1000, globals=globals())
print('Retrieval time: ', t)
import torchvision.transforms as T
import matplotlib.pyplot as plt
n_bits = 5
t = T.Compose([T.CenterCrop(148), # RealNVP preprocessing
T.Resize(64),
T.Lambda(lambda im: np.array(im, dtype=np.float32)), # to numpy
T.Lambda(lambda x: np.floor(x / 2**(8 - n_bits)) / 2**n_bits), # lower bits
T.ToTensor(),
T.Lambda(lambda t: t + torch.rand(t.shape)/ 2**n_bits)]) # dequantize
d_ = CelebA('~/Data/', transform=t)
fig, axs = plt.subplots(1,2)
axs[0].imshow(np.array(d[0][0]))
axs[1].imshow(d_[0][0].numpy().transpose(1,2,0))
plt.show()
| 5,419 | 43.793388 | 490 | py |
SpinalNet | SpinalNet-master/Regression/Regression_NN_and_SpinalNet.py | # -*- coding: utf-8 -*-
"""
This script performs regression on toy datasets.
There exist several relations between inputs and output.
We investigate both of the traditional feed-forward and SpinalNet
for all of these input-output relations.
----------
Multiplication:
y = x1*x2*x3*x4*x5*x6*x7*x8 + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0573, Minimum Loss 0.003966
Epoch [200/200], Loss: 0.0170, Minimum Loss 0.002217
Normal
Epoch [100/200], Loss: 0.0212, Minimum Loss 0.003875
Epoch [200/200], Loss: 0.0373, Minimum Loss 0.003875
Sine multiplication:
y = torch.sin(x1*x2*x3*x4*x5*x6*x7*x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0013, Minimum Loss 0.000910
Epoch [200/200], Loss: 0.0023, Minimum Loss 0.000910
Normal
Epoch [100/200], Loss: 0.0090, Minimum Loss 0.003403
Epoch [200/200], Loss: 0.0041, Minimum Loss 0.001554
Addition:
y = (x1+x2+x3+x4+x5+x6+x7+x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0038, Minimum Loss 0.001007
Epoch [200/200], Loss: 0.0022, Minimum Loss 0.000855
Normal
Epoch [100/200], Loss: 0.0024, Minimum Loss 0.001178
Epoch [200/200], Loss: 0.0021, Minimum Loss 0.000887
Sine Addition:
y = torch.sin(x1+x2+x3+x4+x5+x6+x7+x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0254, Minimum Loss 0.001912
Epoch [200/200], Loss: 0.0029, Minimum Loss 0.001219
Normal
Epoch [100/200], Loss: 0.0019, Minimum Loss 0.001918
Epoch [200/200], Loss: 0.0038, Minimum Loss 0.001086
@author: Dipu
"""
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
import matplotlib.pyplot as plt
import numpy as np
#import imageio
torch.manual_seed(0)
size_x=1000
x1 = torch.unsqueeze(torch.randn(size_x), dim=1)
x2 = torch.unsqueeze(torch.randn(size_x), dim=1)
x3 = torch.unsqueeze(torch.randn(size_x), dim=1)
x4 = torch.unsqueeze(torch.randn(size_x), dim=1)
x5 = torch.unsqueeze(torch.randn(size_x), dim=1)
x6 = torch.unsqueeze(torch.randn(size_x), dim=1)
x7 = torch.unsqueeze(torch.randn(size_x), dim=1)
x8 = torch.unsqueeze(torch.randn(size_x), dim=1)
half_in_size=4
y = (x1*x2*x3*x4*x5*x6*x7*x8) + 0.2*torch.rand(size_x)
# noisy y data (tensor), shape=(100, 1)
x=torch.cat([x1,x2,x3,x4,x5,x6,x7,x8], dim=1)
x, y = Variable(x), Variable(y)
# another way to define a network
net = torch.nn.Sequential(
torch.nn.Linear(half_in_size*2, 200),
torch.nn.LeakyReLU(),
torch.nn.Linear(200, 100),
torch.nn.LeakyReLU(),
torch.nn.Linear(100, 1),
)
import torch.nn as nn
first_HL = 50
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.lru = nn.LeakyReLU()
self.fc1 = nn.Linear(half_in_size, first_HL)
self.fc2 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc3 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc4 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc5 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc6 = nn.Linear(half_in_size+first_HL, first_HL)
self.fcx = nn.Linear(first_HL*6, 1)
def forward(self, x):
x1 = x[:,0:half_in_size]
x1 = self.lru(self.fc1(x1))
x2= torch.cat([ x[:,half_in_size:half_in_size*2], x1], dim=1)
x2 = self.lru(self.fc2(x2))
x3= torch.cat([x[:,0:half_in_size], x2], dim=1)
x3 = self.lru(self.fc3(x3))
x4= torch.cat([x[:,half_in_size:half_in_size*2], x3], dim=1)
x4 = self.lru(self.fc4(x4))
x5= torch.cat([x[:,0:half_in_size], x4], dim=1)
x5 = self.lru(self.fc3(x5))
x6= torch.cat([x[:,half_in_size:half_in_size*2], x5], dim=1)
x6 = self.lru(self.fc4(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fcx(x)
return x
#------------------------------------------------------------------------------
"""
Comment these two lines for traditional NN training.
"""
net = SpinalNet()
print('SpinalNet')
#------------------------------------------------------------------------------
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
BATCH_SIZE = 64
EPOCH = 200
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=True, num_workers=0,)
min_loss =100
# start training
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
b_x = Variable(batch_x)
b_y = Variable(batch_y)
prediction = net(b_x) # input x and predict based on x
loss = loss_func(prediction, b_y) # must be (1. nn output, 2. target)
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
loss = loss.item()
if loss<min_loss:
min_loss = loss
net_opt = net
if epoch%100 == 99:
print ("Epoch [{}/{}], Loss: {:.4f}, Minimum Loss {:.6f}" .format(epoch+1, EPOCH, loss, min_loss))
| 5,420 | 28.302703 | 105 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_hymenoptera.py | '''
Most part of the code and dataset is copied from PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
#%%
half_in_size = 256
first_HL = 5
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.lru = nn.LeakyReLU()
self.fc1 = nn.Linear(half_in_size, first_HL)
self.fc2 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc3 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc4 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc5 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc6 = nn.Linear(half_in_size+first_HL, first_HL)
self.fcx = nn.Linear(first_HL*6, 2)
def forward(self, x):
x1 = x[:,0:half_in_size]
x1 = self.lru(self.fc1(x1))
x2= torch.cat([ x[:,half_in_size:half_in_size*2], x1], dim=1)
x2 = self.lru(self.fc2(x2))
x3= torch.cat([x[:,0:half_in_size], x2], dim=1)
x3 = self.lru(self.fc3(x3))
x4= torch.cat([x[:,half_in_size:half_in_size*2], x3], dim=1)
x4 = self.lru(self.fc4(x4))
x5= torch.cat([x[:,0:half_in_size], x4], dim=1)
x5 = self.lru(self.fc3(x5))
x6= torch.cat([x[:,half_in_size:half_in_size*2], x5], dim=1)
x6 = self.lru(self.fc4(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fcx(x)
return x
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 7,504 | 29.384615 | 78 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_STL10.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Data is downloaded from pytorch and divided into folders
using script 'Pytorch_data_to_folders.py'
Effects:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 Spinal FC gives 98.23% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/stl10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 512
Num_class=10
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 7,704 | 30.068548 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_CIFAR100.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is distributed in folders with following script:
https://au.mathworks.com/matlabcentral/answers/329597-save-cifar-100-images
Performances:
Data augmentation:
transforms.Resize((136,136)),
transforms.RandomRotation(10,),
transforms.RandomCrop(128),
Cifar-10
resnet101 Spinal FC (1024*4 neurons) gives 97.03% Accuracy
Cifar-100
resnet101 Spinal FC (512*4 neurons) gives 84.04% Accuracy
Data augmentation:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
Cifar-10
wide_resnet101_2 Spinal FC (512*4 neurons) gives 98.12% Accuracy
Cifar-100
wide_resnet101_2 Spinal FC (512*4 neurons) gives 88.34% Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/cifar10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 512
Num_class=100
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 8,199 | 28.818182 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_CIFAR10.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The dataset is downloaded from https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders
Performances:
Data augmentation:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
Cifar-10
wide_resnet101_2 gives 98.22% Accuracy
wide_resnet101_2 Spinal FC (20*4 neurons dropout_bn) gives 98.12% Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/cifar10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=28,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
# model_ft = models.wide_resnet101_2(pretrained=True)
# num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_VGG() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50) | 9,644 | 30.314935 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_SVHN.py | '''
Data is downloaded from pytorch and divided into folders
using script 'Pytorch_data_to_folders.py'
Effects:
transforms.Resize((272,320)),
transforms.RandomRotation(15,),
transforms.CenterCrop(272),
transforms.RandomCrop(256),
transforms.ToTensor(),
wide_resnet101_2 Spinal ResNet FC gives 97.87% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,320)),
transforms.RandomRotation(15,),
transforms.CenterCrop(272),
transforms.RandomCrop(256),
transforms.ToTensor(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((272,320)),
transforms.CenterCrop((256,256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/SVHN'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=28,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
# model_ft = models.wide_resnet101_2(pretrained=True)
# num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_VGG() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 9,514 | 30.611296 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_CINIC10.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The Dataset is downloaded from https://www.kaggle.com/mengcius/cinic10
Effects:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 Spinal FC gives 93.60% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
'test': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
}
data_dir = 'data/cinic10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=9,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width_vgg = 1024 #Small for Resnet, large for VGG
layer_width_res = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width_res*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width_vgg*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
#model_ft.fc = SpinalNet_VGG #SpinalNet_ResNet() #SpinalNet_VGG
model_ft.fc = SpinalNet_ResNet()
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test' and epoch>8:
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
time_elapsed = time.time() - since
print('Time from start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 10,712 | 31.761468 | 113 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_Caltech101.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is Downloaded from https://www.kaggle.com/huangruichu/caltech101/version/2
Effects:
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 provides 96.11% test accuracy
wide_resnet101_2 SpinalNet_SpinalNet provides 96.40% test accuracy
wide_resnet101_2 SpinalNet_VGG provides 96.87% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'test': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/Caltech101'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=30,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 1024 #Slall for Resnet, large for VGG
Num_class=101
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
model_ft.fc = nn.Linear(num_ftrs, 101)
#model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 10,203 | 30.788162 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Pytorch_data_to_folders.py | # -*- coding: utf-8 -*-
"""
We need to create train and val folders manually before running the script
@author: Dipu
"""
import torchvision
import matplotlib
import matplotlib.pyplot as plt
import numpy
import imageio
import os
data_train = torchvision.datasets.SVHN('./data', split='train', download=True,
transform=torchvision.transforms.Compose([
]))
folderlocation = './data/SVHN/'
for iter1 in range(10): # 10 = number of classes
path = folderlocation + 'train/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'val/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
for iter1 in range(len(data_train)):
x, a = data_train[iter1]
imageio.imwrite(folderlocation + 'train/'+str(a)+'/train'+str(iter1)+'.png', x)
data_test = torchvision.datasets.SVHN('./data', split='test', download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_test)):
x, a = data_test[iter1]
imageio.imwrite(folderlocation + 'val/'+str(a)+'/test'+str(iter1)+'.png', x)
| 1,195 | 28.170732 | 83 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_Fruits360.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is Downloaded from https://www.kaggle.com/moltean/fruits
Effects:
transforms.Resize((140,140)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
Example output in Kaggle:
https://www.kaggle.com/dipuk0506/spinalnet-fruit360-99-99-accuracy
In that example, we got 99.99% Accuracy.
In one training session, we got 100% accuracy.
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'Training': transforms.Compose([
transforms.Resize((140,140)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'Test': transforms.Compose([
transforms.Resize(128),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = '../input/fruits/fruits-360'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['Training', 'Test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=224,
shuffle=True, num_workers=0)
for x in ['Training', 'Test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['Training', 'Test']}
class_names = image_datasets['Training'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['Training']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 131 #Small for Resnet, large for VGG
Num_class=131
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['Training', 'Test']:
if phase == 'Training':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'Training'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'Training':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'Training':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'Test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20) | 9,786 | 32.064189 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_Stanford_Cars.py |
'''
Stanford Cars
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is downloaded from https://www.kaggle.com/jutrera/stanford-car-dataset-by-classes-folder?
Effect:
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
196 classes
wide_resnet101_2 Spinal ResNet FC gives 93.35% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/car_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=9,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 196 #Small for Resnet, large for VGG
Num_class=196
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.0001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 9,929 | 30.52381 | 97 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_Oxford102flower.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The dataset is downloaded from https://www.kaggle.com/c/oxford-102-flower-pytorch/data
Effects:
transforms.Resize((464,464)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 gives 99.39% validation accuracy
wide_resnet101_2 SpinalNet_VGG FC gives 99.14% validation accuracy
wide_resnet101_2 SpinalNet_ResNet FC gives 99.30% validation accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((464,464)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/Oxford_flower102'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=8,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 102 #Small for Resnet, large for VGG
Num_class=102
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 9,691 | 30.986799 | 93 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_Bird225.py | '''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Data Link:
https://www.kaggle.com/gpiosenka/100-bird-species
Version 30
Downloaded on 20/08/2020
Performances:
Data augmentation:
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
vgg19_bn Spinal FC (1024*4 neurons) gives 98.49% Validation Accuracy 99.02% Corresponding Test Accuracy
vgg19_bn Fc of VGG 4096-two layers gives 98.49% Validation Accuracy 98.67% Corresponding Test Accuracy
wide_resnet101_2 Spinal FC (1024*4 neurons) gives 98.84% Validation Accuracy 99.56% Corresponding Test Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
#transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'test': transforms.Compose([
#transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/bird225'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 1024
Num_class=225
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 8,387 | 28.850534 | 120 | py |
SpinalNet | SpinalNet-master/Transfer Learning/Transfer_Learning_MNIST.py | # Execution info: https://www.kaggle.com/dipuk0506/transfer-learning-on-mnist
from __future__ import print_function, division
import matplotlib
import imageio
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
folderlocation = './Data/'
path = folderlocation
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'train/'
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'valid/'
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'test/'
if not os.path.exists(path):
os.mkdir(path)
for iter1 in range(10): # 10 = number of classes
path = folderlocation + 'train/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'valid/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'test/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
data_train = torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_train)):
x, a = data_train[iter1]
if iter1%10 ==0:
imageio.imwrite(folderlocation + 'valid/'+str(a)+'/valid'+str(iter1)+'.png', x)
else:
imageio.imwrite(folderlocation + 'train/'+str(a)+'/train'+str(iter1)+'.png', x)
data_test = torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_test)):
x, a = data_test[iter1]
imageio.imwrite(folderlocation + 'test/'+str(a)+'/test'+str(iter1)+'.png', x)
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
#model_ft = models.wide_resnet101_2(pretrained=True)
#num_ftrs = model_ft.fc.in_features
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((120,120)),
transforms.RandomRotation(10,),
transforms.RandomCrop(112),
transforms.RandomPerspective(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
'valid': transforms.Compose([
transforms.Resize(112),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
'test': transforms.Compose([
transforms.Resize(112),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
}
data_dir = folderlocation
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'valid', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=128,
shuffle=True, num_workers=0)
for x in ['train', 'valid', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
half_in_size = round(num_ftrs/2)
layer_width = 1024
Num_class=10
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_Resnet(nn.Module):
def __init__(self):
super(SpinalNet_Resnet, self).__init__()
self.layer1 = nn.Linear(half_in_size, layer_width)
self.layer2 = nn.Linear(half_in_size+layer_width, layer_width)
self.layer3 = nn.Linear(half_in_size+layer_width, layer_width)
self.layer4 = nn.Linear(half_in_size+layer_width, layer_width)
self._out = nn.Linear(layer_width*4, Num_class)
def forward(self, x):
x1 = self.layer1(x[:, 0:half_in_size])
x2 = self.layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self._out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.classifier = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=10)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=10)
#Save model
# Specify a path
PATH = "state_dict_model.pt"
# Save
torch.save(model_ft.state_dict(), PATH)
## Load
#model = Net()
#model.load_state_dict(torch.load(PATH))
#model.eval()
import shutil
shutil.rmtree(folderlocation)
| 10,753 | 29.725714 | 93 | py |
SpinalNet | SpinalNet-master/CIFAR-10/ResNet_default_and_SpinalFC_CIFAR10.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal ResNet code for CIFAR-10.
This code trains both NNs as two different models.
There is option of choosing ResNet18(), ResNet34(), SpinalResNet18(), or
SpinalResNet34().
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
first_HL = 256
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(nn.Module):
"""Basic Block of ReseNet."""
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
"""Basic Block of ReseNet Builder."""
super(BasicBlock, self).__init__()
# First conv3x3 layer
self.conv1 = conv3x3(in_channels, out_channels, stride)
# Batch Normalization
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
# ReLU Activation Function
self.relu = nn.ReLU(inplace=True)
# Second conv3x3 layer
self.conv2 = conv3x3(out_channels, out_channels)
# Batch Normalization
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
# downsample for `residual`
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward Pass of Basic Block."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class SpinalResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=10):
"""Residual Neural Network Builder."""
super(SpinalResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
#self.fc_layer = nn.Linear(256, num_classes)
self.fc1 = nn.Linear(256, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(256 + first_HL, first_HL) #added
self.fc_layer = nn.Linear(first_HL*4, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out1 = self.maxpool2(out)
#print('out1',out1.shape)
out2 = out1[:,:,0,0]
#print('out2',out2.shape)
out2 = out2.view(out2.size(0),-1)
#print('out2',out2.shape)
x1 = out1[:,:,0,0]
x1 = self.relu(self.fc1(x1))
x2= torch.cat([ out1[:,:,0,1], x1], dim=1)
x2 = self.relu(self.fc1_1(x2))
x3= torch.cat([ out1[:,:,1,0], x2], dim=1)
x3 = self.relu(self.fc1_2(x3))
x4= torch.cat([ out1[:,:,1,1], x3], dim=1)
x4 = self.relu(self.fc1_3(x4))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
out = torch.cat([x, x4], dim=1)
out = self.fc_layer(out)
return out
class ResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=10):
"""Residual Neural Network Builder."""
super(ResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
#self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=1)
self.fc_layer = nn.Linear(256, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out = self.maxpool(out)
out = out.view(out.size(0), -1)
out = out.view(out.size(0), -1)
out = self.fc_layer(out)
return out
model = ResNet(BasicBlock, [1,1,1,1]).to(device)
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2]).to(device)
def SpinalResNet18():
return SpinalResNet(BasicBlock, [2,2,2,2]).to(device)
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3]).to(device)
def SpinalResNet34():
return SpinalResNet(BasicBlock, [3, 4, 6, 3]).to(device)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = ResNet18().to(device)
model2 = SpinalResNet18().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
#%%
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer1, curr_lr1)
print('Epoch :{} Accuracy NN: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer2, curr_lr2)
print('Epoch :{} Accuracy SpinalNet: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 13,289 | 29.906977 | 101 | py |
SpinalNet | SpinalNet-master/CIFAR-10/VGG_default_and_SpinalFC_CIFAR10.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for CIFAR-10.
This code trains both NNs as two different models.
There is option of choosing NN among:
vgg11_bn(), vgg13_bn(), vgg16_bn(), vgg19_bn() and
Spinalvgg11_bn(), Spinalvgg13_bn(), Spinalvgg16_bn(), Spinalvgg19_bn()
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 200
learning_rate = 0.0001
Half_width =256
layer_width=512
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
# Normalize training set together with augmentation
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
cfg = {
'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
}
class VGG(nn.Module):
def __init__(self, features, num_class=10):
super().__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
return output
class SpinalVGG(nn.Module):
def __init__(self, features, num_class=10):
super().__init__()
self.features = features
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(), nn.Linear(layer_width*4, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
x = output
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
input_channel = 3
for l in cfg:
if l == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
continue
layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]
if batch_norm:
layers += [nn.BatchNorm2d(l)]
layers += [nn.ReLU(inplace=True)]
input_channel = l
return nn.Sequential(*layers)
def vgg11_bn():
return VGG(make_layers(cfg['A'], batch_norm=True))
def vgg13_bn():
return VGG(make_layers(cfg['B'], batch_norm=True))
def vgg16_bn():
return VGG(make_layers(cfg['D'], batch_norm=True))
def vgg19_bn():
return VGG(make_layers(cfg['E'], batch_norm=True))
def Spinalvgg11_bn():
return SpinalVGG(make_layers(cfg['A'], batch_norm=True))
def Spinalvgg13_bn():
return SpinalVGG(make_layers(cfg['B'], batch_norm=True))
def Spinalvgg16_bn():
return SpinalVGG(make_layers(cfg['D'], batch_norm=True))
def Spinalvgg19_bn():
return SpinalVGG(make_layers(cfg['E'], batch_norm=True))
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = vgg19_bn().to(device)
model2 = Spinalvgg19_bn().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0.0
best_accuracy2 =0.0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 8,991 | 28.578947 | 116 | py |
SpinalNet | SpinalNet-master/CIFAR-10/CNN_dropout_CIFAR10.py | # -*- coding: utf-8 -*-
"""
This Script contains the default CNN dropout code for comparison.
The code is collected and changed from:
https://zhenye-na.github.io/2018/09/28/pytorch-cnn-cifar10.html
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
# 3x3 convolution
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
model = CNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Decay learning rate
if (epoch) == 1 or epoch>20:
curr_lr /= 3
update_lr(optimizer, curr_lr)
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))
model.train()
| 4,878 | 27.04023 | 97 | py |
SpinalNet | SpinalNet-master/CIFAR-10/CNN_dropout_SpinalFC_CIFAR10.py | # -*- coding: utf-8 -*-
"""
This Script contains the CNN dropout with Spinal fully-connected layer.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(1)
random.seed(1)
Half_width =2048
layer_width = 128
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
# 3x3 convolution
class SpinalCNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(SpinalCNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(layer_width*4, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
model = SpinalCNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
best_accuracy =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))
if best_accuracy> correct / total:
curr_lr = curr_lr/3
update_lr(optimizer, curr_lr)
model.train()
| 7,532 | 29.746939 | 93 | py |
SpinalNet | SpinalNet-master/MNIST_VGG/EMNIST_digits_VGG_and _SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for EMNIST(Digits).
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,675 | 32.551724 | 116 | py |
SpinalNet | SpinalNet-master/MNIST_VGG/KMNIST_VGG_and_SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for kMNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train() | 11,721 | 32.301136 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.