repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
deficient-efficient | deficient-efficient-master/count.py | '''Count parameters or mult-adds in models.'''
from __future__ import print_function
import math
import torch
import argparse
from torch.autograd import Variable
from models.wide_resnet import WideResNet, WRN_50_2
from models.darts import DARTS
from models.MobileNetV2 import MobileNetV2
from funcs import what_conv_block
ignored_modules = []
def get_layer_info(layer):
layer_str = str(layer)
type_name = layer_str[:layer_str.find('(')].strip()
return type_name
def get_layer_param(model):
return sum([p.numel() for p in model.parameters()])
class OpCounter(object):
def __init__(self):
self.count_ops = 0
self.count_params = 0
def measure_layer(self, layer, x):
delta_ops = 0
delta_params = 0
multi_add = 1
type_name = get_layer_info(layer)
x = x[0]
### ops_conv
if type_name in ['Conv2d']:
out = layer.old_forward(x)
out_h = out.size(2)
out_w = out.size(3)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
delta_params = get_layer_param(layer)
### ops_nonlinearity
elif type_name in ['ReLU']:
delta_ops = x.numel()
delta_params = get_layer_param(layer)
### ops_pooling
elif type_name in ['AvgPool2d','MaxPool2d']:
in_w = x.size()[2]
if type(layer.kernel_size) is tuple:
k = layer.kernel_size[0]
else:
k = layer.kernel_size
kernel_ops = k * k
out_w = int((in_w + 2 * layer.padding - k) / layer.stride + 1)
out_h = int((in_w + 2 * layer.padding - k) / layer.stride + 1)
out = layer.old_forward(x)
assert out_h == out.size(2)
assert out_w == out.size(3)
delta_ops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops
delta_params = get_layer_param(layer)
### ops_linear
elif type_name in ['Linear']:
weight_ops = layer.weight.numel() * multi_add
bias_ops = layer.bias.numel()
delta_ops = x.size()[0] * (weight_ops + bias_ops)
delta_params = get_layer_param(layer)
### ops_nothing
elif type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout']:
delta_params = get_layer_param(layer)
### sequential takes no extra time
elif type_name in ['Sequential']:
pass
### riffle shuffle
elif type_name in ['Riffle']:
# technically no floating point operations
pass
### channel expansion
elif type_name in ['ChannelExpand']:
# assume concatentation doesn't take extra FLOPs
pass
### channel contraction
elif type_name in ['ChannelCollapse']:
# do as many additions as we have channels
delta_ops += x.size(1)
### ACDC Convolution
elif type_name in ['FastStackedConvACDC']:
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
assert layer.groups == 1
# pretend we're actually passing through the ACDC layers within
N = max(layer.out_channels, layer.in_channels) # size of ACDC layers
acdc_ops = 0
for l in layer.layers:
acdc_ops += 4*N + 5*N*math.log(N,2)
delta_params += 2*N
delta_ops += acdc_ops*out_h*out_w
### Grouped ACDC Convolution
elif type_name in ['GroupedConvACDC']:
assert False
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
# pretend we're actually passing through the ACDC layers within
N = layer.kernel_size[0]
acdc_ops = layer.groups*(4*N + 5*N*math.log(N,2))
conv_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] / layer.groups
ops = min(acdc_ops, conv_ops)
delta_ops += ops*out_h*out_w
delta_params += 2*N
### HashedNet Convolution
elif type_name in ['HashedConv2d']:
# same number of ops as convolution
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
delta_params = get_layer_param(layer)
elif type_name in ['DepthwiseSep']:
# wrapper for Conv2Ds, that are counted above
pass
elif type_name in ['TensorTrain', 'Tucker']:
if hasattr(layer, 'grouped'):
out = layer.grouped.old_forward(x)
out_h = out.size(2)
out_w = out.size(3)
else:
out = layer.old_forward(x)
out_h = out.size(2)
out_w = out.size(3)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
delta_params = sum([p.numel() for k,p in layer._parameters.items() if p is not None])
elif type_name in ['LinearShuffleNet', 'ShuffleBlock']:
# all operations implemented by internal conv2d, so this can be ignored
pass
elif type_name in ['GenericLowRank']:
# all operations implemented by internal conv2d
pass
elif type_name in ['LowRank']:
if hasattr(layer, 'grouped'):
x = layer.grouped.old_forward(x)
if layer.upsample > 1:
x = x.repeat(1,layer.upsample,1,1)
out = layer.lowrank.old_forward(x)
out_h = out.size(2)
out_w = out.size(3)
delta_ops = 2 * layer.lowrank.in_channels * \
layer.lowrank.out_channels * layer.lowrank.kernel_size[0] * \
layer.lowrank.kernel_size[1] * out_h * out_w / layer.lowrank.groups \
* multi_add
delta_params = get_layer_param(layer.lowrank)
#elif type_name in ['TensorTrain']:
elif False:
# number of cores
d = 0
while hasattr(layer, 'weight_core_%i'%d):
core = getattr(layer, 'weight_core_%i'%d)
d += 1
d += 1
# max dimension
m = max(layer.tn_weight.torch().size())
# maximal rank
r = max(layer.tn_weight.ranks_tt)
# max dim of kernel matrix
maxMN = max(layer.in_channels, layer.out_channels)
# complexity
c = d*r*r*m*maxMN + d*r*r*r*m
print(layer.in_channels*layer.out_channels, c)
import ipdb
ipdb.set_trace()
# number of Us
n_us = 0
while hasattr(layer, 'weight_u_%i'%n_us):
u = getattr(layer, 'weight_u_%i'%n_us)
n_us += 1
if type_name == 'TensorTrain':
# From "Tensorizing Neural Networks"
# For the case of the TT-matrix-by-explicit-vector product c = Wb,
# the computational complexity is O(d r^2 m max(M,N)), where d is
# the number of cores of the TT-matrix W, m is the max_k m_k, r is
# the maximal rank and N = \prod_k=1^d n_k is the length of the
# vector b.
#
# Seems like, naively, the mult-adds can be estimated as those used
# by an independent matrix multiply for each core, with the result
# then summed. Reading this from Section 4.
d = n_cores
r = layer.rank
N = x.size(1)
# plus the ops of the grouped convolution? or does that get caught anyway?
#assert False
# this would double count the grouped
#delta_params = get_layer_param(layer)
### unknown layer type
else:
if type_name not in ignored_modules:
ignored_modules.append(type_name)
#raise TypeError('unknown layer type: %s' % type_name)
self.count_ops += delta_ops
self.count_params += delta_params
return None
def measure_model(model, H, W):
opcount = OpCounter()
data = Variable(torch.zeros(1, 3, H, W))
def modify_forward(model):
for child in model.modules():
def new_forward(m):
def lambda_forward(*x):
opcount.measure_layer(m, x)
try:
return m.old_forward(*x)
except NotImplementedError as e:
print(m)
raise e
return lambda_forward
child.old_forward = child.forward
child.forward = new_forward(child)
# recursive function
def restore_forward(model):
for child in model.children():
# leaf node
if is_leaf(child) and hasattr(child, 'old_forward'):
child.forward = child.old_forward
child.old_forward = None
else:
restore_forward(child)
modify_forward(model)
model.forward(data)
#restore_forward(model)
return opcount.count_ops, opcount.count_params
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WRN parameter/flop usage')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100', 'imagenet'], help='Choose between Cifar10/100/imagenet.')
#network stuff
parser.add_argument('--network', default='WideResNet', type=str, help='network to use')
parser.add_argument('--wrn_depth', default=40, type=int, help='depth for WRN')
parser.add_argument('--wrn_width', default=2, type=float, help='width for WRN')
parser.add_argument('--module', default=None, type=str, help='path to file containing custom Conv and maybe Block module definitions')
parser.add_argument('--blocktype', default='Basic',type=str, help='blocktype used if specify a --conv')
parser.add_argument('--conv', default=None, type=str, help='Conv type')
args = parser.parse_args()
# Stuff happens from here:
Conv, Block = what_conv_block(args.conv, args.blocktype, args.module)
if args.dataset == 'cifar10':
h,w = 32,32
num_classes = 10
elif args.dataset == 'cifar100':
h,w = 32, 32
num_classes = 100
elif args.dataset == 'imagenet':
h,w = 224, 224
num_classes = 1000
else:
raise ValueError(args.dataset)
# instance the model
def build_network(Conv, Block):
if args.network == 'WideResNet':
return WideResNet(args.wrn_depth, args.wrn_width, Conv, Block,
num_classes=num_classes, dropRate=0)
elif args.network == 'WRN_50_2':
return WRN_50_2(Conv)
elif args.network == 'DARTS':
assert not args.conv == 'Conv', 'The base network here used' \
' separable convolutions, so you probably did not mean to set this' \
' option.'
return DARTS(Conv, num_classes=num_classes, drop_path_prob=0., auxiliary=False)
elif args.network == 'MobileNetV2':
return MobileNetV2(Conv)
model = build_network(Conv, Block)
# count how many parameters are in it
flops, params = measure_model(model, h, w)
print("Mult-Adds: %.5E"%flops)
print("Params: %.5E"%params)
sanity = sum([p.numel() for p in model.parameters()])
assert sanity == params, "Sanity check, parameters: %.5E =/= %.5E \n %s"%(sanity, params, str(ignored_modules))
print(ignored_modules)
#import time
#for m in model.modules():
# time.sleep(0.2)
# print(get_layer_info(m), sum([p.numel() for p in m.parameters()]))
| 12,725 | 38.156923 | 138 | py |
deficient-efficient | deficient-efficient-master/funcs.py | import torch
import torch.nn.functional as F
from models import *
from models.wide_resnet import parse_options
def distillation(y, teacher_scores, labels, T, alpha):
return F.kl_div(F.log_softmax(y/T, dim=1), F.softmax(teacher_scores/T, dim=1)) * (T*T * 2. * alpha)\
+ F.cross_entropy(y, labels) * (1. - alpha)
def at(x):
return F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
def at_loss(x, y):
return F.mse_loss(at(x), at(y))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_no_params(net, verbose=True):
params = net.state_dict()
tot= 0
conv_tot = 0
for p in params:
no = params[p].view(-1).__len__()
tot += no
if 'bn' not in p:
if verbose:
print('%s has %d params' % (p,no))
if 'conv' in p:
conv_tot += no
if verbose:
print('Net has %d conv params' % conv_tot)
print('Net has %d params in total' % tot)
return tot
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def what_conv_block(conv, blocktype, module):
if conv is not None:
Conv, Block = parse_options(conv, blocktype)
elif module is not None:
conv_module = imp.new_module('conv')
with open(module, 'r') as f:
exec(f.read(), conv_module.__dict__)
Conv = conv_module.Conv
try:
Block = conv_module.Block
except AttributeError:
# if the module doesn't implement a custom block,
# use default option
_, Block = parse_options('Conv', args.blocktype)
else:
raise ValueError("You must specify either an existing conv option, or supply your own module to import")
return Conv, Block
| 2,477 | 26.533333 | 112 | py |
deficient-efficient | deficient-efficient-master/load_wrn50_2.py | import re
import torch
import torch.nn.functional as F
from torch.utils import model_zoo
from models.blocks import Conv
from models.wide_resnet import WRN_50_2
from collections import OrderedDict
def all_equal(iterable_1, iterable_2):
return all([x == y for x,y in zip(iterable_1, iterable_2)])
# functional model definition from functional zoo: https://github.com/szagoruyko/functional-zoo/blob/master/imagenet-validation.py#L27-L47
def define_model(params):
def conv2d(input, params, base, stride=1, pad=0):
return F.conv2d(input, params[base + '.weight'],
params[base + '.bias'], stride, pad)
def group(input, params, base, stride, n):
o = input
for i in range(0,n):
b_base = ('%s.block%d.conv') % (base, i)
x = o
o = conv2d(x, params, b_base + '0')
o = F.relu(o)
o = conv2d(o, params, b_base + '1', stride=i==0 and stride or 1, pad=1)
o = F.relu(o)
o = conv2d(o, params, b_base + '2')
if i == 0:
o += conv2d(x, params, b_base + '_dim', stride=stride)
else:
o += x
o = F.relu(o)
return o
# determine network size by parameters
blocks = [sum([re.match('group%d.block\d+.conv0.weight'%j, k) is not None
for k in params.keys()]) for j in range(4)]
def f(input, params):
o = F.conv2d(input, params['conv0.weight'], params['conv0.bias'], 2, 3)
o = F.relu(o)
o = F.max_pool2d(o, 3, 2, 1)
o_g0 = group(o, params, 'group0', 1, blocks[0])
o_g1 = group(o_g0, params, 'group1', 2, blocks[1])
o_g2 = group(o_g1, params, 'group2', 2, blocks[2])
o_g3 = group(o_g2, params, 'group3', 2, blocks[3])
o = F.avg_pool2d(o_g3, 7, 1, 0)
o = o.view(o.size(0), -1)
o = F.linear(o, params['z.fc.weight'], params['z.fc.bias'])
return o
return f
if __name__ == '__main__':
# our model definition
net = WRN_50_2(Conv)
# load parameters from model zoo
params = model_zoo.load_url('https://s3.amazonaws.com/modelzoo-networks/wide-resnet-50-2-export-5ae25d50.pth')
# otherwise the ordering will be messed up
params['z.fc.weight'] = params.pop('fc.weight')
params['z.fc.bias'] = params.pop('fc.bias')
params = sorted(params.items()) # list of tuples, in order
# make state_dict from model_zoo parameters
state_dict = OrderedDict()
w_i, b_i = 0, 0
for n,p in net.state_dict().items():
if 'weight' in n and 'bn' not in n:
while 'weight' not in params[w_i][0]:
w_i += 1
k, v = params[w_i]
print(k, " == ", n)
assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}"
state_dict[n] = v
w_i += 1
elif 'bias' in n:
while 'bias' not in params[b_i][0]:
b_i += 1
k, v = params[b_i]
print(k, " == ", n)
assert all_equal(v.shape, p.size()), f"{v.shape} =/= {p.size()}"
state_dict[n] = v
b_i += 1
else:
state_dict[n] = p
assert max(w_i, b_i) == len(params) # all params are matched
# test if this is the same as the functional implementation
params = OrderedDict(params)
f = define_model(params)
net.load_state_dict(state_dict)
net.eval()
X = torch.randn(2,3,224,224)
func_out, net_out = f(X, params), net(X)[0]
error = torch.abs(func_out - net_out)
assert error.max() < 1e-3, "%f"%error.max()
print("Output given random input is equal within %f"%error.max())
# now save a new checkpoint file, with correct saved terms
save_dict = {}
save_dict['net'] = state_dict
save_dict['epoch'] = 100
save_dict['conv'] = 'Conv'
save_dict['blocktype'] = None
save_dict['module'] = None
torch.save(save_dict, 'checkpoints/wrn_50_2.imagenet.modelzoo.t7')
| 4,000 | 36.046296 | 138 | py |
deficient-efficient | deficient-efficient-master/collate_results.py | # open schedule json, then search for which machines the longest progressed job
# has run on
import json
import sys
import os
import torch
import subprocess
from subprocess import PIPE
from collections import OrderedDict
from funcs import what_conv_block
from models.wide_resnet import WideResNet, WRN_50_2
from models.darts import DARTS
from count import measure_model
from tqdm import tqdm
with open('machine_list.json', 'r') as f:
# list of strings with "hostname:path" where the deficient-efficient repos
# can be found
machines = json.load(f)
def ckpt_name(experiment):
if '-s' in experiment:
prefix = '-s'
else:
prefix = '-t'
ckpt_idx = [i for i, arg in enumerate(experiment) if arg == prefix][0]+1
return experiment[ckpt_idx]
def parse_name(path):
monthday = path.split(".")[-2]
path = path.split('.')[1:] # split off part containing settings
# index to cut out for settings
idx = [i for i,s in enumerate(path) if monthday == s or 'student' == s][0]
method, setting = (".".join(path[:idx])).split("_") # this is just the settings string now
return 'student' in path, method, setting
def parse_checkpoint(ckpt_name, ckpt_contents):
results = OrderedDict()
results['epoch'] = ckpt_contents['epoch']
results['val_errors'] = [float(x) for x in ckpt_contents['val_errors']]
results['train_errors'] = [float(x) for x in ckpt_contents['train_errors']]
# hard part: count parameters by making an instance of the network
network = {'wrn_28_10': 'WideResNet', 'darts': 'DARTS', 'wrn_50_2': 'WRN_50_2'}[ckpt_name.split(".")[0]]
h,w = {'WideResNet': (32,32), 'DARTS': (32,32), 'WRN_50_2': (224,224)}[network]
SavedConv, SavedBlock = what_conv_block(ckpt_contents['conv'],
ckpt_contents['blocktype'], ckpt_contents['module'])
model = build_network(SavedConv, SavedBlock, network)
flops, params = measure_model(model, h, w)
assert params == sum([p.numel() for p in model.parameters()])
results['no_params'] = params
results['flops'] = flops
results['settings'] = parse_name(ckpt_name)
results['scatter'] = (params, results['val_errors'][-1], results['train_errors'][-1], results['epoch'], flops)
return results
# instance the model
def build_network(Conv, Block, network):
if network == 'WideResNet':
return WideResNet(28, 10, Conv, Block,
num_classes=10, dropRate=0)
elif network == 'WRN_50_2':
return WRN_50_2(Conv)
elif network == 'DARTS':
return DARTS(Conv, num_classes=10, drop_path_prob=0., auxiliary=False)
def keep_oldest(collated, ckpt_name, ckpt_contents):
# if the checkpoint already exists in collated,
# keep it if it's run for more epochs
ckpt = parse_checkpoint(ckpt_name, ckpt_contents)
try:
existing_epochs = collated[ckpt_name]['epoch']
except KeyError:
# doesn't exist yet so return
return ckpt
if int(existing_epochs) < int(ckpt['epoch']):
return ckpt
else:
return collated[ckpt_name]
def main():
try:
# read the schedule from json
json_path = sys.argv[1]
with open(json_path, "r") as f:
schedule = json.load(f)
# prepare directory
if not os.path.exists("collate"):
os.mkdir("collate")
else:
# clean up directory
old_ckpts = os.listdir("collate")
for c in old_ckpts:
os.remove(os.path.join("collate", c))
# make a list of all the checkpoint files we need to check
checkpoints = []
for e in schedule:
checkpoints.append(ckpt_name(e)+".t7")
# look for these checkpoints on every machine we know about
collated = []
for m in tqdm(machines, desc='machine'):
# connect to the remote machine
hostname, directory = m.split(":")
checkpoint_dir = os.path.join(directory, "checkpoints")
completed = subprocess.run(f"ssh {hostname} ls {checkpoint_dir}".split(" "), stdout=PIPE, stderr=PIPE)
checkpoints_on_remote = completed.stdout.decode().split("\n")
# look for overlap between that and the checkpoints we care about
overlap = list(set(checkpoints_on_remote) & set(checkpoints))
for checkpoint in tqdm(overlap, desc="copying"):
checkpoint_loc = os.path.join(checkpoint_dir, checkpoint)
checkpoint_dest = f"collate/{hostname}.{checkpoint}"
if not os.path.exists(checkpoint_dest):
subprocess.run(f"scp {hostname}:{checkpoint_loc} {checkpoint_dest}".split(" "), stdout=PIPE, stderr=PIPE)
except IndexError:
pass
# iterate over copied files
collated = OrderedDict()
copied = os.listdir("collate")
for checkpoint in tqdm(copied, desc='Opening checkpoints'):
checkpoint_loc = os.path.join("collate", checkpoint)
hostname = checkpoint.split(".")[0]
checkpoint_name = ".".join(checkpoint.split(".")[1:])
checkpoint_contents = torch.load(checkpoint_loc)
collated[checkpoint_name] = keep_oldest(collated, checkpoint_name, checkpoint_contents)
for k in collated:
print(k, collated[k]['epoch'], collated[k]['val_errors'][-1])
with open("collated.json", "w") as f:
f.write(json.dumps(collated, indent=2))
if __name__ == "__main__":
main()
| 5,460 | 37.730496 | 125 | py |
deficient-efficient | deficient-efficient-master/history.py | # opens checkpoints and prints the commands used to run each
import torch
import os
import argparse
parser = argparse.ArgumentParser(description='Inspect saved checkpoints')
parser.add_argument('--match', type=str, default=None, help='Filter checkpoints by keyword.')
if __name__ == '__main__':
args = parser.parse_args()
ckpt_paths = os.listdir("checkpoints")
# filter for search term
if args.match is not None:
ckpt_paths = [c for c in ckpt_paths if args.match in c]
for p in ckpt_paths:
try:
ckpt = torch.load("checkpoints/"+p)
if 'args' in ckpt.keys():
print(p + " (%i-%.2f) "%(ckpt['epoch'], ckpt['val_errors'][-1]) + ": " + " ".join(ckpt['args']))
except:
pass
| 766 | 33.863636 | 113 | py |
deficient-efficient | deficient-efficient-master/models/resnet.py | '''This is a rewriting of the native resnet definition that comes with Pytorch, to allow it to use our blocks and
convolutions for imagenet experiments. Annoyingly, the pre-trained models don't use pre-activation blocks.'''
import torch
import torch.nn as nn
import math
import torchvision.models.resnet
import torch.utils.model_zoo as model_zoo
from .blocks import *
__all__ = ['ResNet', 'resnet18', 'resnet34']#, 'resnet50', 'resnet101','resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet(nn.Module):
def __init__(self, conv, block, n, num_classes=1000, s=1):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
nChannels =[64, 64, 128, 256, 512]
self.layer1 = torch.nn.ModuleList()
for i in range(s):
self.layer1.append(NetworkBlock(int(n[0] // s), nChannels[0] if i == 0 else nChannels[1],
nChannels[1], block, 1, conv=conv))
self.layer2 = torch.nn.ModuleList()
for i in range(s):
self.layer2.append(NetworkBlock(int(n[1] // s), nChannels[1] if i == 0 else nChannels[2],
nChannels[2], block, 2, conv=conv))
self.layer3 = torch.nn.ModuleList()
for i in range(s):
self.layer3.append(NetworkBlock(int(n[2] // s), nChannels[2] if i == 0 else nChannels[3],
nChannels[3], block, 2, conv=conv))
self.layer4 = torch.nn.ModuleList()
for i in range(s):
self.layer4.append(NetworkBlock(int(n[3] // s), nChannels[3] if i == 0 else nChannels[4],
nChannels[4], block, 2, conv=conv))
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
activations = []
out = self.maxpool(self.relu(self.bn1(self.conv1(x))))
for sub_block in self.layer1:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer2:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer3:
out = sub_block(out)
activations.append(out)
for sub_block in self.layer4:
out = sub_block(out)
activations.append(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out, activations
def resnet18(pretrained=False, conv=nnConv, block=OldBlock):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(conv,block, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, conv=nnConv, block=OldBlock):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(conv,block, [3, 4, 6, 3])
if pretrained:
old_model = torchvision.models.resnet.resnet34(pretrained=False)
old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
new_state_dict = model.state_dict()
old_state_dict = old_model.state_dict()
# This assumes the sequence of each module in the network is the same in both cases.
# Ridiculously, batch norm params are stored in a different sequence in the downloaded state dict, so we have to
# load the old model definition, load in its downloaded state dict to change the order back, then transfer this!
old_model = torchvision.models.resnet.resnet34(pretrained=False)
old_model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
old_names = [v for v in old_state_dict]
new_names = [v for v in new_state_dict]
for i,j in enumerate(old_names):
new_state_dict[new_names[i]] = old_state_dict[j]
model.load_state_dict(new_state_dict)
return model
def test2():
net = resnet34()
x = torch.randn(1, 3, 224, 224)
y, _ = net(Variable(x))
print(y.size())
if __name__ == '__main__':
test2()
# Haven't written the old bottleneck yet.
#
# def resnet50(pretrained=False, **kwargs):
# """Constructs a ResNet-50 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
# return model
#
#
# def resnet101(pretrained=False, **kwargs):
# """Constructs a ResNet-101 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
# return model
#
#
# def resnet152(pretrained=False, **kwargs):
# """Constructs a ResNet-152 model.
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# """
# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
# return model
| 6,623 | 34.047619 | 120 | py |
deficient-efficient | deficient-efficient-master/models/hashed.py | # HashedNet Convolutional Layer: https://arxiv.org/abs/1504.04788
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
class HashedConv2d(nn.Conv2d):
"""Conv2d with the weights of the convolutional filters parameterised using
a budgeted subset of parameters and random indexes to place those
parameters in the weight tensor."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HashedConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=True)
# grab budgeted subset of the weights
assert self.weight.numel() >= budget, \
f"budget {budget} higher than {self.weight.numel()}"
self.weight_size = self.weight.size()
budgeted = self.weight.data.view(-1)[:budget]
del self.weight
# register non-budgeted weights
self.register_parameter('hashed_weight', nn.Parameter(budgeted))
# precompute random index matrix
idxs = torch.randint(high=budget-1, size=self.weight_size).long()
idxs = idxs.view(-1)
# register indexes as a buffer
self.register_buffer('idxs', idxs)
#self.W = self.weight[self.idxs].cuda()
def forward(self, x):
# index to make weight matrix
try:
W = self.hashed_weight.index_select(0, self.idxs).view(self.weight_size)
except RuntimeError:
import ipdb
ipdb.set_trace()
# complete forward pass as normal
return F.conv2d(x, W, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class HalfHashedSeparable(nn.Module):
"""A depthwise grouped convolution followed by a HashedNet 1x1 convolution.
Grouped convolution could also be hashed, but it's not."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HalfHashedSeparable, self).__init__()
# has to have hashed in the name to get caught by alternative weight
# decay setting, it is not actually hashed
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
# we spent some of the budget on that grouped convolution
assert self.grouped.weight.numel() == reduce(lambda x,y: x*y, self.grouped.weight.size())
budget = budget - self.grouped.weight.numel()
assert budget > 0, \
"budget exceeded by grouped convolution: %i too many"%(-budget)
self.hashed = HashedConv2d(in_channels, out_channels, 1, budget,
bias=bias)
else:
self.grouped = None
self.hashed = HashedConv2d(in_channels, out_channels, 1, budget,
stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
return self.hashed(x)
class HashedSeparable(nn.Module):
"""Separabled, where grouped and pointwise are both Hashed.."""
def __init__(self, in_channels, out_channels, kernel_size, budget,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(HashedSeparable, self).__init__()
# has to have hashed in the name to get caught by alternative weight
# decay setting, it is not actually hashed
grouped_params = float(in_channels * kernel_size * kernel_size)
pointwise_params = float(in_channels * out_channels)
total_params = grouped_params + pointwise_params
grouped_budget = int(budget*grouped_params/total_params)
pointwise_budget = int(budget*pointwise_params/total_params)
#print(total_params, grouped_budget, pointwise_budget)
if kernel_size > 1:
self.grouped = HashedConv2d(in_channels, in_channels, kernel_size,
grouped_budget, stride=stride, padding=padding,
dilation=dilation, groups=in_channels, bias=False)
stride = 1
else:
self.grouped = None
pointwise_budget = budget
assert budget > 0, "budget must be greater than 0, was %i"%(-budget)
self.hashed = HashedConv2d(in_channels, out_channels, 1,
pointwise_budget, stride=stride, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
return self.hashed(x)
if __name__ == '__main__':
from timeit import timeit
setup = "from __main__ import HashedConv2d; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedConv2d(256, 512, 3, 1000, bias=False).cuda()"
print("HashedConv2d: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "import torch.nn as nn; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = nn.Conv2d(256, 512, 3, bias=False).cuda()"
print("Conv2d: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "from __main__ import HalfHashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HalfHashedSeparable(256, 512, 3, 5000, bias=False).cuda()"
print("HalfHashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100))
setup = "from __main__ import HashedSeparable; import torch; X = torch.randn(128, 256, 28, 28).cuda(); conv = HashedSeparable(256, 512, 3, 5000, bias=False).cuda()"
print("HashedSeparable: ", timeit("_ = conv(X)", setup=setup, number=100))
| 5,827 | 48.811966 | 176 | py |
deficient-efficient | deficient-efficient-master/models/darts.py | # DARTS network definition
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.checkpoint import checkpoint
from collections import namedtuple
from .blocks import DepthwiseSep
from .wide_resnet import group_lowrank, compression
#############################
# Training utils start here #
#############################
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10():
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if True: # always use cutout
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
#####################################
# End of training utils #
#####################################
# Model definition code starts here #
#####################################
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
OPS = {
'none' : lambda C, stride, affine, conv: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine, conv: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3' : lambda C, stride, affine, conv: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect' : lambda C, stride, affine, conv: Identity() if stride == 1 else FactorizedReduce(C, C, conv, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine, conv: SepConv(C, C, 3, stride, 1, Conv=conv, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine, conv: SepConv(C, C, 5, stride, 2, Conv=conv, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine, conv: SepConv(C, C, 7, stride, 3, Conv=conv, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine, conv: DilConv(C, C, 3, stride, 2, 2, Conv=conv, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine, conv: DilConv(C, C, 5, stride, 4, 2, Conv=conv, affine=affine),
# this is never used so you can remove it without hitting any errors
# 'conv_7x1_1x7' : lambda C, stride, affine, conv: nn.Sequential(
# nn.ReLU(inplace=False),
# nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
# nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
# nn.BatchNorm2d(C, affine=affine)
# ),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, ConvClass, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
#ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass
self.op = nn.Sequential(
nn.ReLU(inplace=False),
ConvClass(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
#return self.op(x)
return checkpoint(self.op, x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, Conv=DepthwiseSep, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
Conv(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, Conv=DepthwiseSep, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
Conv(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
Conv(C_in, C_out, kernel_size=kernel_size, stride=1, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, ConvClass, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
#ConvClass = nn.Conv2d if ConvClass is DepthwiseSep else ConvClass
#ConvClass = nn.Conv2d
self.conv_1 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = ConvClass(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
def factorized_reduce(x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)
return self.bn(out)
#out = checkpoint(cat_1, *[self.conv_1(x), self.conv_2(x[:,:,1:,1:])])
#return factorized_reduce(x)
return checkpoint(factorized_reduce, x)
#return out
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, Conv):
super(Cell, self).__init__()
self.Conv = Conv
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, Conv)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, Conv, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, Conv, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True, self.Conv)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
#return checkpoint(cat_1, *[states[i] for i in self._concat])
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)
mask = mask.to(x.device)
x.div_(keep_prob)
x.mul_(mask)
return x
class DARTS(nn.Module):
def __init__(self, ConvClass=DepthwiseSep, C=36, num_classes=10, layers=20, auxiliary=True,
genotype=DARTS_V2, drop_path_prob=0.2):
self.kwargs = dict(ConvClass=ConvClass, C=C, num_classes=num_classes,
layers=layers, auxiliary=auxiliary, genotype=genotype,
drop_path_prob=drop_path_prob)
super(DARTS, self).__init__()
self.drop_path_prob = drop_path_prob
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, ConvClass)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
cell_AMs = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
layers = len(self.cells)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i in [layers//3, 2*layers//3]:
cell_AMs.append(attention(s0))
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, cell_AMs, logits_aux
if __name__ == '__main__':
darts = DARTS()
X = torch.randn(10,3,32,32)
print(darts(X))
| 11,450 | 32.979228 | 429 | py |
deficient-efficient | deficient-efficient-master/models/wide_resnet.py | # network definition
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
# wildcard import for legacy reasons
if __name__ == '__main__':
from blocks import *
else:
from .blocks import *
def parse_options(convtype, blocktype):
# legacy cmdline argument parsing
if isinstance(convtype,str):
conv = conv_function(convtype)
else:
raise NotImplementedError("Tuple convolution specification no longer supported.")
if blocktype =='Basic':
block = BasicBlock
elif blocktype =='Bottle':
block = BottleBlock
elif blocktype =='Old':
block = OldBlock
else:
block = None
return conv, block
def group_lowrank(named_parameters, weight_decay, compression_ratio):
lowrank_params, other_params = [], []
for n,p in named_parameters:
if 'A' in n or 'D' in n:
lowrank_params.append(p)
elif 'shuffle' in n:
lowrank_params.append(p)
elif 'hashed' in n:
lowrank_params.append(p)
elif 'weight_core' in n or 'weight_u' in n:
lowrank_params.append(p)
elif 'lowrank' in n:
lowrank_params.append(p)
else:
other_params.append(p)
return [{'params': lowrank_params,
'weight_decay': compression_ratio*weight_decay},
{'params': other_params}]
def compression(model_class, kwargs):
# assume there is a kwarg "conv", which is the convolution we've chosen
compressed_params = sum([p.numel() for p in
model_class(**kwargs).parameters()])
if 'genotype' in list(kwargs.keys()):
# standard conv with DARTS is DepthwiseSep
kwargs['ConvClass'] = DepthwiseSep
else:
# everything else it's Conv
kwargs['ConvClass'] = Conv
uncompressed_params = sum([p.numel() for p in
model_class(**kwargs).parameters()])
ratio = float(compressed_params)/float(uncompressed_params)
print("Compression: %i to %i, ratio %.2f"%(uncompressed_params,
compressed_params, ratio))
return ratio
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, ConvClass, block, num_classes=10, dropRate=0.0, s = 1):
super(WideResNet, self).__init__()
self.kwargs = dict(depth=depth, widen_factor=widen_factor, ConvClass=ConvClass,
block=block, num_classes=num_classes, dropRate=dropRate, s=s)
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
nChannels = [int(a) for a in nChannels]
assert ((depth - 4) % 6 == 0) # why?
n = (depth - 4) // 6
assert n % s == 0, 'n mod s must be zero'
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = torch.nn.ModuleList()
for i in range(s):
self.block1.append(NetworkBlock(int(n//s), nChannels[0] if i == 0 else nChannels[1],
nChannels[1], block, 1, dropRate, ConvClass))
# 2nd block
self.block2 = torch.nn.ModuleList()
for i in range(s):
self.block2.append(NetworkBlock(int(n//s), nChannels[1] if i == 0 else nChannels[2],
nChannels[2], block, 2 if i == 0 else 1, dropRate, ConvClass))
# 3rd block
self.block3 = torch.nn.ModuleList()
for i in range(s):
self.block3.append(NetworkBlock(int(n//s), nChannels[2] if i == 0 else nChannels[3],
nChannels[3], block, 2 if i == 0 else 1, dropRate, ConvClass))
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
# normal is better than uniform initialisation
# this should really be in `self.reset_parameters`
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
try:
m.weight.data.normal_(0, math.sqrt(2. / n))
except AttributeError:
pass
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
# iterate over parameters and separate those in ACDC layers
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, x):
activation_maps = []
out = self.conv1(x)
#activations.append(out)
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
for sub_block in self.block1:
out = sub_block(out)
activation_maps.append(attention(out))
for sub_block in self.block2:
out = sub_block(out)
activation_maps.append(attention(out))
for sub_block in self.block3:
out = sub_block(out)
activation_maps.append(attention(out))
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out), activation_maps
class ResNet(nn.Module):
def __init__(self, ConvClass, layers, block=Bottleneck, widen=1,
num_classes=1000, expansion=4):
self.kwargs = dict(layers=layers, expansion=expansion,
ConvClass=ConvClass, widen=widen, num_classes=num_classes,
block=block)
self.expansion = expansion
super(ResNet, self).__init__()
self.Conv = ConvClass
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64*widen, layers[0])
self.layer2 = self._make_layer(block, 128*widen, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256*widen, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512*widen, layers[3], stride=2)
self.avgpool = nn.AvgPool2d((7, 7), 1, 0)
self.fc = nn.Linear(512*widen * self.expansion, num_classes)
#self.fc = self.Conv(512*widen * self.expansion, num_classes, kernel_size=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if hasattr(m, 'weight'):
w = m.weight
nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * self.expansion:
downsample = nn.Sequential(OrderedDict([
('conv', self.Conv(self.inplanes, planes * self.expansion,
kernel_size=1, stride=stride, padding=0, bias=False)),
('bn', nn.BatchNorm2d(planes * self.expansion))
]))
layers = []
layers.append(block(self.inplanes, planes, self.Conv, stride, downsample, self.expansion))
self.inplanes = planes * self.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, self.Conv, expansion=self.expansion))
return nn.Sequential(*layers)
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
# iterate over parameters and separate those in other layer types
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
if self.train:
x = self.layer1(x)
#x = checkpoint(self.layer1, x)
#x = checkpoint_sequential(self.layer1, 1, x)
else:
x = self.layer1(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer2(x)
#x = checkpoint(self.layer2, x)
#x = checkpoint_sequential(self.layer2, 1, x)
else:
x = self.layer2(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer3(x)
#x = checkpoint(self.layer3, x)
#x = checkpoint_sequential(self.layer3, 1, x)
else:
x = self.layer3(x)
attention_maps.append(attention(x))
if self.train:
x = self.layer4(x)
#x = checkpoint(self.layer4, x)
#x = checkpoint_sequential(self.layer4, 1, x)
else:
x = self.layer4(x)
attention_maps.append(attention(x))
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
#x = x.view(x.size(0), -1)
return x, attention_maps
def WRN_50_2(Conv, Block=None):
assert Block is None
return ResNet(Conv, [3, 4, 6, 3], widen=2, expansion=2)
def test():
net = WideResNet(28, 10, conv_function("Shuffle_7"), BasicBlock)
params = net.grouped_parameters(5e-4)
params = [d['params'] for d in params]
print("Low-rank: ", sum([p.numel() for p in params[0]]))
print("Full-rank: ", sum([p.numel() for p in params[1]]))
print("FC: ", sum([p.numel() for p in net.fc.parameters()]))
net = WRN_50_2(conv_function("Shuffle_7"))
params = net.grouped_parameters(5e-4)
params = [d['params'] for d in params]
print("Low-rank: ", sum([p.numel() for p in params[0]]))
print("Full-rank: ", sum([p.numel() for p in params[1]]))
print("FC: ", sum([p.numel() for p in net.fc.parameters()]))
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
if __name__ == '__main__':
test()
| 10,717 | 36.872792 | 106 | py |
deficient-efficient | deficient-efficient-master/models/decomposed.py | # Substitute layer explicitly decomposing the tensors in convolutional layers
# All implemented using tntorch: https://github.com/rballester/tntorch
# All also use a separable design: the low-rank approximate pointwise
# convolution is preceded by a grouped convolution
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import tntorch as tn
torch.set_default_dtype(torch.float32)
def dimensionize(t, d, rank_scale):
"""Take a tensor, t, and reshape so that it has d dimensions, of roughly
equal size."""
# if not, we have to do some work
N = t.numel()
# do d-th root with log
equal = math.exp((1./d)*math.log(N))
# if this is an integer, our work here is done
if abs(round(equal) - equal) < 1e-6:
dims = [int(round(equal))]*d
# if the tensor already has d dimensions
elif t.ndimension() == d:
dims = list(t.size())
# oh no, then we want to build up a list of dimensions it *does* divide by
else:
dims = []
for i in range(d-1):
divisor = closest_divisor(N, int(round(equal)))
dims.append(divisor)
N = N//divisor
dims.append(N)
# rank between dimensions must be less than
ranks = {}
ranks['ranks_tt'] = [max(1,int(round(rank_scale*min(b,a)))) for b,a in zip(dims, dims[1:])]
ranks['ranks_tucker'] = [max(1,int(round(rank_scale*d))) for d in dims]
ranks['ranks_cp'] = max(1,int(round(rank_scale*min(dims))))
return t.view(*dims), ranks
def closest_divisor(N, d):
if N < d:
return N
while N%d != 0:
d += 1
return d
class TnTorchConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
TnConstructor, stride=1, padding=0, dilation=1, groups=1,
bias=True):
self.TnConstructor = TnConstructor
assert groups == 1
if kernel_size == 1:
super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1,
stride=stride, padding=padding, dilation=dilation, bias=bias)
elif kernel_size > 1:
super(TnTorchConv2d, self).__init__(in_channels, out_channels, 1, bias=bias)
self.grouped = nn.Conv2d(in_channels, in_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.rank_scale = rank_scale
self.tn_weight = self.TnConstructor(self.weight.data.squeeze(), rank_scale=self.rank_scale)
# store the correct size for this weight
self.weight_size = self.weight.size()
# check the fit to the weight initialisation
self.store_metrics(self.weight)
# delete the original weight
del self.weight
# then register the cores of the Tensor Train as parameters
self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us)
def register_tnparams(self, cores, Us):
cores = [] if all([c is None for c in cores]) else cores
Us = [] if all([u is None for u in Us]) else Us
# tensor train or cp cores
for i,core in enumerate(cores):
core_name = 'weight_core_%i'%i
if hasattr(self, core_name):
delattr(self, core_name)
core.requires_grad = True
self.register_parameter(core_name, nn.Parameter(core))
# replace Parameter in tn.Tensor object
self.tn_weight.cores[i] = getattr(self, core_name)
for i, u in enumerate(Us):
u_name = 'weight_u_%i'%i
if hasattr(self, u_name):
delattr(self, u_name)
u.requires_grad = True
self.register_parameter(u_name, nn.Parameter(u))
# replace Parameter in tn.Tensor object
self.tn_weight.Us[i] = getattr(self, u_name)
def conv_weight(self):
weight = self.tn_weight.torch()
n,d,_,_ = self.weight_size
return weight.view(n,d,1,1)
def reset_parameters(self):
if hasattr(self, 'tn_weight'):
# full rank weight tensor
weight = self.conv_weight()
else:
weight = self.weight.data
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
weight.data.uniform_(-stdv, stdv)
if hasattr(self, 'tn_weight'):
self.tn_weight = self.TnConstructor(weight.data.squeeze(), rank_scale=self.rank_scale)
# update cores
self.register_tnparams(self.tn_weight.cores, self.tn_weight.Us)
else:
self.weight.data = weight
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if hasattr(self, 'grouped'):
out = self.grouped(x)
else:
out = x
weight = self.conv_weight()
return F.conv2d(out, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def store_metrics(self, full):
t = self.tn_weight
full = full.view(t.torch().size())
self.compression = (full.numel(), t.numel(), full.numel() / t.numel())
self.relative_error = tn.relative_error(full, t)
self.rmse = tn.rmse(full, t)
self.r_squared = tn.r_squared(full, t)
def extra_repr(self):
extra = []
extra.append(self.tn_weight.__repr__())
extra.append('Compression ratio: {}/{} = {:g}'.format(*self.compression))
extra.append('Relative error: %f'%self.relative_error)
extra.append('RMSE: %f'%self.rmse)
extra.append('R^2: %f'%self.r_squared)
return "\n".join(extra)
class TensorTrain(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def TT(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_tt=ranks['ranks_tt'])
super(TensorTrain, self).__init__(in_channels, out_channels,
kernel_size, rank_scale, TT, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
class Tucker(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def tucker(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_tucker=ranks['ranks_tucker'])
super(Tucker, self).__init__(in_channels, out_channels, kernel_size,
rank_scale, tucker, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
class CP(TnTorchConv2d):
def __init__(self, in_channels, out_channels, kernel_size, rank_scale,
dimensions, stride=1, padding=0, dilation=1, groups=1, bias=True):
def cp(tensor, rank_scale):
tensor, ranks = dimensionize(tensor, dimensions, rank_scale)
return tn.Tensor(tensor, ranks_cp=ranks['ranks_cp'])
super(CP, self).__init__(in_channels, out_channels, kernel_size,
rank_scale, cp, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
if __name__ == '__main__':
for ConvClass in [TensorTrain, Tucker, CP]:
X = torch.randn(5,16,32,32)
tnlayer = ConvClass(16,16,3,0.5,2,bias=False)
tnlayer.reset_parameters()
print(tnlayer)
tnlayer.zero_grad()
y = tnlayer(X)
l = y.sum()
l.backward()
for n,p in tnlayer.named_parameters():
assert p.requires_grad, n
assert torch.abs(tnlayer.weight_core_0.grad - tnlayer.tn_weight.cores[0].grad).max() < 1e-5
# same output on the GPU
tnlayer, X = tnlayer.cuda(), X.cuda()
assert torch.abs(tnlayer(X).cpu() - y).max() < 1e-5
for ConvClass in [TensorTrain, Tucker, CP]:
X = torch.randn(5,16,32,32)
tnlayer = ConvClass(16,16,3,0.5,4,bias=False)
tnlayer.reset_parameters()
print(tnlayer)
| 8,252 | 39.856436 | 99 | py |
deficient-efficient | deficient-efficient-master/models/MobileNetV2.py | import torch
import torch.nn as nn
import math
# wildcard import for legacy reasons
if __name__ == '__main__':
import sys
sys.path.append("..")
from models.blocks import *
from models.wide_resnet import compression, group_lowrank
# only used in the first convolution, which we do not substitute by convention
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
# only used for final fully connectec layers
def conv_1x1_bn(inp, oup, ConvClass):
return nn.Sequential(
ConvClass(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, ConvClass):
super(InvertedResidual, self).__init__()
self.stride = stride
self.Conv = ConvClass
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
self.Conv(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, ConvClass, block=None, n_class=1000,
input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
self.kwargs = dict(ConvClass=ConvClass, block=block, n_class=n_class,
input_size=input_size, width_mult=width_mult)
block = InvertedResidual
self.Conv = ConvClass
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t, ConvClass=self.Conv))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, ConvClass=self.Conv))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel, self.Conv))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier_conv = self.Conv(self.last_channel, n_class, 1, 1, 0, bias=True)
#self.classifier = \
#nn.Dropout(0.2), remove dropout for training according to github
# nn.(self.last_channel, n_class),
#)
self._initialize_weights()
def classifier(self, x):
n, c = x.size()
x = self.classifier_conv(x.view(n,c,1,1))
n, c, _, _ = x.size()
return x.view(n,c)
def forward(self, x):
#y_orig = self.features(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
y = x
for block in self.features:
y = block(y)
if isinstance(block, InvertedResidual):
if block.stride > 1:
attention_maps.append(attention(y))
#error = torch.abs(y-y_orig).max()
#assert error < 1e-2, f"Error {error} above 0.01"
x = y
x = x.mean(3).mean(2)
x = self.classifier(x)
return x, attention_maps
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if hasattr(m, 'weight'):
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def save_reference():
net = MobileNetV2()
net.eval()
x = torch.randn(1,3,224,224).float()
y = net(x)
print(y.size())
torch.save(x, "reference_input_mobilenet.torch")
torch.save(y, "reference_output_mobilenet.torch")
torch.save(net.state_dict(), "reference_state_mobilenet.torch")
def match_keys(net, state):
nstate = net.state_dict()
old_keys = [k for k in state]
for i, k in enumerate(nstate):
p = state[old_keys[i]]
if i == (len(old_keys)-2):
n,m = p.size()
nstate[k] = p.view(n,m,1,1)
else:
nstate[k] = p
return nstate
def test():
import os
net = MobileNetV2(Conv)
if os.path.exists("reference_state_mobilenet.torch"):
state = torch.load("reference_state_mobilenet.torch")
state = match_keys(net, state)
net.load_state_dict(state)
net.eval()
x = torch.load("reference_input_mobilenet.torch")
else:
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
# check if these match the test weights
if os.path.exists("reference_output_mobilenet.torch"):
ref_output = torch.load("reference_output_mobilenet.torch")
error = torch.abs(ref_output - y).max()
print(f"Error: {error}, Max logit: {y.max()}/{ref_output.max()}, Min logit: {y.min()}/{ref_output.min()}")
state = {
'net': net.state_dict(),
'epoch': 150,
'args': None,
'width': None,
'depth': None,
'conv': 'Conv',
'blocktype': None,
'module': None,
'train_losses': None,
'train_errors': None,
'val_losses': None,
'val_errors': [28.2],
}
torch.save(state, "mobilenetv2.tonylins.t7")
def test_compression():
net = MobileNetV2(Conv)
#net = MobileNetV2(conv_function('Hashed_0.1'))
nparams = lambda x: sum([p.numel() for p in x.parameters()])
for block in net.features:
print(nparams(block))
for x in block:
print(x)
print(nparams(x))
#CompressedConv = conv_function("Hashed_0.1")
for conv in ['Shuffle_%i'%i for i in [4,8,16,32]]+['Hashed_0.01']:
print(conv)
CompressedConv = conv_function(conv)
net = MobileNetV2(CompressedConv)
print(" ", net.compression_ratio())
if __name__ == '__main__':
test()
#test_compression()
| 8,316 | 33.086066 | 118 | py |
deficient-efficient | deficient-efficient-master/models/blocks.py | # blocks and convolution definitions
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
if __name__ == 'blocks' or __name__ == '__main__':
from hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable
from decomposed import TensorTrain, Tucker, CP
else:
from .hashed import HashedConv2d, HalfHashedSeparable, HashedSeparable
from .decomposed import TensorTrain, Tucker, CP
def HashedDecimate(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
original_params = out_channels*in_channels*kernel_size*kernel_size // groups
budget = original_params//10
return HashedConv2d(in_channels, out_channels, kernel_size, budget,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def SepHashedDecimate(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
assert groups == 1
original_params = out_channels*in_channels*kernel_size*kernel_size
budget = original_params//10
conv = HalfHashedSeparable(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
n_params = sum([p.numel() for p in conv.parameters()])
budget = budget + conv.hashed.bias.numel()
assert n_params <= budget, f"{n_params} > {budget}"
return conv
from pytorch_acdc.layers import FastStackedConvACDC
def ACDC(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def OriginalACDC(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels, kernel_size, 12,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias, original=True)
class GenericLowRank(nn.Module):
"""A generic low rank layer implemented with a linear bottleneck, using two
Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping
with the other low-rank layers here."""
def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,
padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
super(GenericLowRank, self).__init__()
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, bias=False)
self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)
else:
self.grouped = None
self.lowrank_contract = nn.Conv2d(in_channels, rank, 1, stride=stride,
dilation=dilation, bias=False)
self.lowrank_expand = nn.Conv2d(rank, out_channels, 1, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
x = self.lowrank_contract(x)
return self.lowrank_expand(x)
class LowRank(nn.Module):
"""A generic low rank layer implemented with a linear bottleneck, using two
Conv2ds in sequence. Preceded by a depthwise grouped convolution in keeping
with the other low-rank layers here."""
def __init__(self, in_channels, out_channels, kernel_size, rank, stride=1,
padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
assert out_channels%in_channels == 0
self.upsample = out_channels//in_channels
super(LowRank, self).__init__()
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,
bias=bias)
else:
self.grouped = None
self.lowrank = nn.Conv2d(self.upsample*in_channels, rank, 1,
stride=stride, dilation=dilation, bias=bias)
def forward(self, x):
if self.grouped is not None:
x = self.grouped(x)
if self.upsample > 1:
x = x.repeat(1,self.upsample,1,1)
x = F.conv2d(x, self.lowrank.weight, None, self.lowrank.stride,
self.lowrank.padding, self.lowrank.dilation,
self.lowrank.groups)
return F.conv2d(x, self.lowrank.weight.permute(1,0,2,3),
self.lowrank.bias)
# from: https://github.com/kuangliu/pytorch-cifar/blob/master/models/shufflenet.py#L10-L19
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
class LinearShuffleNet(nn.Module):
"""Linear version of the ShuffleNet block, minus the shortcut connection,
as we assume relevant shortcuts already exist in the network having a
substitution. When linear, this can be viewed as a low-rank tensor
decomposition."""
def __init__(self, in_channels, out_channels, kernel_size, shuffle_groups,
stride=1, padding=0, dilation=1, groups=1, bias=False):
assert groups == 1
super(LinearShuffleNet, self).__init__()
# why 4? https://github.com/jaxony/ShuffleNet/blob/master/model.py#L67
bottleneck_channels = out_channels // 4
self.shuffle_gconv1 = nn.Conv2d(in_channels, bottleneck_channels, 1,
groups=shuffle_groups, bias=False)
self.shuffle = ShuffleBlock(shuffle_groups)
self.shuffle_dwconv = nn.Conv2d(bottleneck_channels, bottleneck_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=bottleneck_channels, bias=False)
self.shuffle_gconv2 = nn.Conv2d(bottleneck_channels, out_channels, 1,
groups=shuffle_groups, bias=bias)
def forward(self, x):
x = self.shuffle_gconv1(x)
x = self.shuffle(x)
x = self.shuffle_dwconv(x)
return self.shuffle_gconv2(x)
def cant_be_shuffled(shuffle_groups, in_channels, out_channels):
# utility function, true if we can't instance shufflenet block using this
divides_in = in_channels%shuffle_groups == 0
divides_out = out_channels%shuffle_groups == 0
divides_bottleneck = (out_channels//4)%shuffle_groups == 0
return not (divides_in and divides_out and divides_bottleneck)
class DepthwiseSep(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(DepthwiseSep, self).__init__()
assert groups == 1
if kernel_size > 1:
self.grouped = nn.Conv2d(in_channels, in_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=in_channels, bias=False)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias)
else:
self.pointwise = nn.Conv2d(in_channels, out_channels, 1,
stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, x):
if hasattr(self, 'grouped'):
out = self.grouped(x)
else:
out = x
return self.pointwise(out)
class Conv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=1,
dilation=1, bias=False):
super(Conv, self).__init__()
# Dumb normal conv incorporated into a class
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias, dilation=dilation)
def forward(self, x):
return self.conv(x)
def conv_function(convtype):
# if convtype contains an underscore, it must have a hyperparam in it
if "_" in convtype:
convtype, hyperparam = convtype.split("_")
if convtype == 'ACDC':
# then hyperparam controls how many layers in each conv
n_layers = int(round(float(hyperparam)))
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return FastStackedConvACDC(in_channels, out_channels,
kernel_size, n_layers, stride=stride,
padding=padding, dilation=dilation, groups=groups,
bias=bias)
elif convtype == 'Hashed':
# then hyperparam controls relative budget for each layer
budget_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
# Hashed Conv2d using 1/10 the original parameters
original_params = out_channels*in_channels*kernel_size*kernel_size // groups
budget = int(original_params*budget_scale)
return HashedConv2d(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'SepHashed':
# then hyperparam controls relative budget for each layer
budget_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
original_params = out_channels*in_channels // groups
budget = int(original_params*budget_scale)
if kernel_size > 1: # budget for a grouped convolution
budget += in_channels*kernel_size*kernel_size
return HalfHashedSeparable(in_channels, out_channels, kernel_size,
budget, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Generic':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
full_rank = max(in_channels,out_channels)
rank = int(rank_scale*full_rank)
return GenericLowRank(in_channels, out_channels, kernel_size,
rank, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'LR':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
full_rank = max(in_channels,out_channels)
rank = int(rank_scale*full_rank)
return LowRank(in_channels, out_channels, kernel_size,
rank, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'TensorTrain':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return TensorTrain(in_channels, out_channels, kernel_size,
rank_scale, 3, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Tucker':
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return Tucker(in_channels, out_channels, kernel_size,
rank_scale, 3, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'CP':
assert False, "Deprecated"
rank_scale = float(hyperparam)
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
return CP(in_channels, out_channels, kernel_size,
rank_scale, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
elif convtype == 'Shuffle':
def conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False):
shuffle_groups = int(hyperparam)
while cant_be_shuffled(shuffle_groups, in_channels, out_channels):
shuffle_groups += -1
return LinearShuffleNet(in_channels, out_channels, kernel_size,
shuffle_groups, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
else:
if convtype == 'Conv':
conv = Conv
elif convtype =='ACDC':
conv = ACDC
elif convtype =='OriginalACDC':
conv = OriginalACDC
elif convtype == 'HashedDecimate':
conv = HashedDecimate
elif convtype == 'SepHashedDecimate':
conv = SepHashedDecimate
elif convtype == 'Sep':
conv = DepthwiseSep
else:
raise ValueError('Conv "%s" not recognised'%convtype)
return conv
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, conv=Conv):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
#assert self.conv2.grouped.padding[0] == 1
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
# modified from torchvision
class Bottleneck(nn.Module):
"""Bottleneck architecture block for ResNet"""
def __init__(self, inplanes, planes, ConvClass, stride=1, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
pointwise = lambda i,o: ConvClass(i, o, kernel_size=1, padding=0,
bias=False)
self.conv1 = pointwise(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = ConvClass(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = pointwise(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def add_residual(self, x, out):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
return out + residual
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#out = checkpoint(self.add_residual, x, out)
out = self.add_residual(x, out)
out = self.relu(out)
return out
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, conv = Conv):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate, conv)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, conv):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate, conv))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
if __name__ == '__main__':
X = torch.randn(5,16,32,32)
# sanity of generic low-rank layer
generic = GenericLowRank(16, 32, 3, 2)
for n,p in generic.named_parameters():
print(n, p.size(), p.numel())
out = generic(X)
print(out.size())
low = LowRank(16, 32, 3, 2)
for n, p in low.named_parameters():
print(n, p.size(), p.numel())
out = low(X)
print(out.size())
assert False
# check we don't initialise a grouped conv when not required
layers_to_test = [LowRank(3,32,1,1), GenericLowRank(3,32,1,1),
HalfHashedSeparable(3,32,1,10), TensorTrain(3,32,1,0.5,3),
Tucker(3,32,1,0.5,3), CP(3,32,1,0.5,3), ACDC(3,32,1)]
for layer in layers_to_test:
assert getattr(layer, 'grouped', None) is None
# and we *do* when it is required
layers_to_test = [LowRank(3,32,3,1), GenericLowRank(3,32,3,1),
HalfHashedSeparable(3,32,3,100), TensorTrain(3,32,3,0.5,3),
Tucker(3,32,3,0.5,3), CP(3,32,3,0.5,3), ACDC(3,32,3)]
for layer in layers_to_test:
assert getattr(layer, 'grouped', None) is not None, layer
# sanity of LinearShuffleNet
X = torch.randn(5,16,32,32)
shuffle = LinearShuffleNet(16,32,3,4)
print(shuffle(X).size())
| 18,941 | 43.992874 | 120 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import obscure_image
from datasets import add_watermark
# this is the same loader used in datasets.py
image_transform = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--condition-file', type=str,
help='if specified, condition on this image.')
parser.add_argument('--condition-type', type=str,
help='image|gray|edge|mask|obscured|watermark')
parser.add_argument('--n-samples', type=int, default=1,
help='Number of images and texts to sample [default: 1]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.condition_type:
assert args.condition_type in ['image', 'gray', 'edge', 'mask', 'obscured', 'watermark']
if not os.path.isdir('./samples'):
os.makedirs('./samples')
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
if args.condition_file and args.condition_type:
image = Image.open(args.condition_file)
if args.condition_type == 'image':
image = image.convert('RGB')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_image.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, image=image)
elif args.condition_type == 'gray':
image = image.convert('L')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_gray.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, gray=image)
elif args.condition_type == 'edge':
image = image.convert('L')
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_edge.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, edge=image)
elif args.condition_type == 'mask':
image = image.convert('L')
image = 1 - image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_mask.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, mask=image)
elif args.condition_type == 'obscured':
image = image.convert('RGB')
image = obscure_image(image)
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_obscured.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, obscured=image)
elif args.condition_type == 'watermark':
image = image.convert('RGB')
image = add_watermark(image)
image = image_transform(image).unsqueeze(0)
save_image(image, './samples/sample_watermark.png')
if args.cuda:
image = image.cuda()
image = Variable(image, volatile=True)
mu, logvar = model.get_params(1, watermark=image)
std = logvar.mul(0.5).exp_()
else: # sample from uniform Gaussian prior
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
gray_recon = F.sigmoid(model.gray_decoder(sample)).cpu().data
edge_recon = F.sigmoid(model.edge_decoder(sample)).cpu().data
mask_recon = F.sigmoid(model.mask_decoder(sample)).cpu().data
obscured_recon = F.sigmoid(model.obscured_decoder(sample)).cpu().data
watermark_recon = F.sigmoid(model.watermark_decoder(sample)).cpu().data
# save image samples to filesystem
save_image(image_recon, './samples/sample_image.png')
save_image(gray_recon, './samples/sample_gray.png')
save_image(edge_recon, './samples/sample_edge.png')
save_image(mask_recon, './samples/sample_mask.png')
save_image(rotated_recon, './samples/sample_rotated.png')
save_image(obscured_recon, './samples/sample_obscured.png')
save_image(watermark_recon, './samples/sample_watermark.png')
| 5,676 | 40.437956 | 96 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class MVAE(nn.Module):
def __init__(self, n_latents=250, use_cuda=False):
super(MVAE, self).__init__()
# define q(z|x_i) for i = 1...6
self.image_encoder = ImageEncoder(n_latents, 3)
self.gray_encoder = ImageEncoder(n_latents, 1)
self.edge_encoder = ImageEncoder(n_latents, 1)
self.mask_encoder = ImageEncoder(n_latents, 1)
self.obscured_encoder = ImageEncoder(n_latents, 3)
self.watermark_encoder = ImageEncoder(n_latents, 3)
# define p(x_i|z) for i = 1...6
self.image_decoder = ImageDecoder(n_latents, 3)
self.gray_decoder = ImageDecoder(n_latents, 1)
self.edge_decoder = ImageDecoder(n_latents, 1)
self.mask_decoder = ImageDecoder(n_latents, 1)
self.obscured_decoder = ImageDecoder(n_latents, 3)
self.watermark_decoder = ImageDecoder(n_latents, 3)
# define q(z|x) = q(z|x_1)...q(z|x_6)
self.experts = ProductOfExperts()
self.n_latents = n_latents
self.use_cuda = use_cuda`
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, gray=None, edge=None, mask=None,
obscured=None, watermark=None):
mu, logvar = self.get_params(image=image, gray=gray, edge=edge, mask=mask,
obscured=obscured, watermark=watermark)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on sample
image_recon = self.image_decoder(z)
gray_recon = self.gray_decoder(z)
edge_recon = self.edge_decoder(z)
mask_recon = self.mask_decoder(z)
obscured_recon = self.obscured_decoder(z)
watermark_recon = self.watermark_decoder(z)
return (image_recon, gray_recon, edge_recon, mask_recon,
rotated_recon, obscured_recon, mu, logvar)
def get_params(self, image=None, gray=None, edge=None,
mask=None, obscured=None, watermark=None):
# define universal expert
batch_size = get_batch_size(image, gray, edge, mask, obscured, watermark)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
if gray is not None:
gray_mu, gray_logvar = self.gray_encoder(gray)
mu = torch.cat((mu, gray_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, gray_logvar.unsqueeze(0)), dim=0)
if edge is not None:
edge_mu, edge_logvar = self.edge_encoder(edge)
mu = torch.cat((mu, edge_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, edge_logvar.unsqueeze(0)), dim=0)
if mask is not None:
mask_mu, mask_logvar = self.mask_encoder(mask)
mu = torch.cat((mu, mask_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, mask_logvar.unsqueeze(0)), dim=0)
if obscured is not None:
obscured_mu, obscured_logvar = self.obscured_encoder(obscured)
mu = torch.cat((mu, obscured_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, obscured_logvar.unsqueeze(0)), dim=0)
if watermark is not None:
watermark_mu, watermark_logvar = self.watermark_encoder(watermark)
mu = torch.cat((mu, watermark_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, watermark_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
def get_batch_size(*args):
for arg in args:
if arg is not None:
return arg.size(0)
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
We will use this for every q(z|x_i) for all i.
@param n_latents: integer
number of latent dimensions
@param n_channels: integer [default: 3]
number of input channels
"""
def __init__(self, n_latents, n_channels=3):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(n_channels, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
We will use this for every p(x_i|z) for all i.
@param n_latents: integer
number of latent dimensions
@param n_channels: integer [default: 3]
number of input channels
"""
def __init__(self, n_latents, n_channels=3):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, n_channels, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # no sigmoid!
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
T = 1 / (var + eps) # precision of i-th Gaussian expert at point x
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1 / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 8,131 | 36.13242 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import random
import numpy as np
from copy import deepcopy
from PIL import Image
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
N_MODALITIES = 6
VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}
class CelebVision(Dataset):
"""Define dataset of images of celebrities with a series of
transformations applied to it.
The user needs to have pre-defined the Anno and Eval folder from
http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
@param partition: string
train|val|test [default: train]
See VALID_PARTITIONS global variable.
@param data_dir: string
path to root of dataset images [default: ./data]
"""
def __init__(self, partition='train', data_dir='./data'):
super(CelebVision, self).__init__()
self.partition = partition
self.data_dir = data_dir
assert partition in VALID_PARTITIONS.keys()
# load a list of images for the user-chosen partition
self.image_paths = load_eval_partition(partition, data_dir=data_dir)
self.size = int(len(self.image_paths))
# resize image to 64 x 64
self.image_transform = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
image_path = os.path.join(self.data_dir, 'img_align_celeba',
self.image_paths[index])
gray_path = os.path.join(self.data_dir, 'img_align_celeba_grayscale',
self.image_paths[index])
edge_path = os.path.join(self.data_dir, 'img_align_celeba_edge',
self.image_paths[index])
mask_path = os.path.join(self.data_dir, 'img_align_celeba_mask',
self.image_paths[index])
# open PIL Image -- these are fixed versions of image that we save
image = Image.open(image_path).convert('RGB')
gray_image = Image.open(gray_path).convert('L')
edge_image = Image.open(edge_path).convert('L')
mask_image = Image.open(mask_path).convert('L')
# add blocked to image
obscured_image = Image.open(image_path).convert('RGB')
obscured_image = obscure_image(obscured_image)
# add watermark to image
watermark_image = Image.open(image_path).convert('RGB')
watermark_image = add_watermark(obscured_image,
watermark_path='./watermark.png')
image = self.image_transform(image)
gray_image = self.image_transform(grayscale_image)
edge_image = self.image_transform(edge_image)
mask_image = self.image_transform(mask_image)
obscured_image = self.image_transform(obscured_image)
watermark_image = self.image_transform(watermark_image)
# masks are normally white with black lines but we want to
# be consistent with edges and MNIST-stuff, we so make the background
# black and the lines white.
mask_image = 1 - mask_image
# return everything as a bundle
return (image, grayscale_image, edge_image,
mask_image, obscured_image, watermark_image)
def __len__(self):
return self.size
def obscure_image(image):
"""Block image vertically in half with black pixels.
@param image: np.array
color image
@return: np.array
color image with vertically blocked pixels
"""
image_npy = deepcopy(np.asarray(image))
# we obscure half height because should be easier to complete
# a face given vertical half than horizontal half
center_h = image_npy.shape[1] // 2
image_npy[:, center_h + 1:, :] = 0
image = Image.fromarray(image_npy)
return image
def add_watermark(image, watermark_path='./watermark.png'):
"""Overlay image of watermark on color image.
@param image: np.array
color image
@param watermark_path: string
path to fixed watermark image
[default: ./watermark.png]
@return: np.array
color image with overlayed watermark
"""
watermark = Image.open(watermark_path)
nw, nh = image.size[0], image.size[1]
watermark = watermark.resize((nw, nh), Image.BICUBIC)
image.paste(watermark, (0, 0), watermark)
return image
| 4,896 | 36.669231 | 78 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
from model import MVAE
from datasets import N_MODALITIES
def elbo_loss(recon_image, image, recon_gray, gray, recon_edge, edge, recon_mask, mask,
recon_rotated, rotated, recon_obscured, obscured, mu, logvar, annealing_factor=1.):
BCE = 0
if recon_image is not None and image is not None:
recon_image, image = recon_image.view(-1, 3 * 64 * 64), image.view(-1, 3 * 64 * 64)
image_BCE = torch.sum(binary_cross_entropy_with_logits(recon_image, image), dim=1)
BCE += image_BCE
if recon_gray is not None and gray is not None:
recon_gray, gray = recon_gray.view(-1, 1 * 64 * 64), gray.view(-1, 1 * 64 * 64)
gray_BCE = torch.sum(binary_cross_entropy_with_logits(recon_gray, gray), dim=1)
BCE += gray_BCE
if recon_edge is not None and edge is not None:
recon_edge, edge = recon_edge.view(-1, 1 * 64 * 64), edge.view(-1, 1 * 64 * 64)
edge_BCE = torch.sum(binary_cross_entropy_with_logits(recon_edge, edge), dim=1)
BCE += edge_BCE
if recon_mask is not None and mask is not None:
recon_mask, mask = recon_mask.view(-1, 1 * 64 * 64), mask.view(-1, 1 * 64 * 64)
mask_BCE = torch.sum(binary_cross_entropy_with_logits(recon_mask, mask), dim=1)
BCE += mask_BCE
if recon_obscured is not None and obscured is not None:
recon_obscured, obscured = recon_obscured.view(-1, 3 * 64 * 64), obscured.view(-1, 3 * 64 * 64)
obscured_BCE = torch.sum(binary_cross_entropy_with_logits(recon_obscured, obscured), dim=1)
BCE += obscured_BCE
if recon_watermark is not None and watermark is not None:
recon_watermark, watermark = recon_watermark.view(-1, 3 * 64 * 64), watermark.view(-1, 3 * 64 * 64)
watermark_BCE = torch.sum(binary_cross_entropy_with_logits(recon_watermark, watermark), dim=1)
BCE += watermark_BCE
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
# NOTE: we use lambda_i = 1 for all i since each modality is roughly equal
ELBO = torch.mean(BCE / float(N_MODALITIES) + annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=250,
help='size of the latent embedding (default: 250)')
parser.add_argument('--batch-size', type=int, default=50, metavar='N',
help='input batch size for training (default: 50)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: 1e-4)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
new_directories = ['./results', './results/image', './results/gray', './results/edge',
'./results/mask', './results/obscured', './results/watermark']
for new_dir in new_directories:
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
train_loader = torch.utils.data.DataLoader(
datasets.CelebVision(partition='train', data_dir='./data'),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
datasets.CelebVision(partition='val', data_dir='./data'),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents, use_cuda=args.cuda)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
for batch_idx, (image, gray_image, edge_image, mask_image,
rotated_image, obscured_image) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
gray_image = gray_image.cuda()
edge_image = edge_image.cuda()
mask_image = mask_image.cuda()
obscured_image = obscured_image.cuda()
watermark_image = watermark_image.cuda()
image = Variable(image)
gray_image = Variable(gray_image)
edge_image = Variable(edge_image)
mask_image = Variable(mask_image)
obscured_image = Variable(obscured_image)
watermark_image = Variable(watermark_image)
batch_size = image.size(0)
# refresh the optimizer
optimizer.zero_grad()
# compute reconstructions using all the modalities
(joint_recon_image, joint_recon_gray, joint_recon_edge,
joint_recon_mask, joint_recon_obscured, joint_recon_watermark,
joint_mu, joint_logvar) = model(image, gray_image, edge_image,
mask_image, obscured_image, watermark_image)
# compute reconstructions using each of the individual modalities
(image_recon_image, image_recon_gray, image_recon_edge,
image_recon_mask, image_recon_obscured, image_recon_watermark,
image_mu, image_logvar) = model(image=image)
(gray_recon_image, gray_recon_gray, gray_recon_edge,
gray_recon_mask, gray_recon_obscured, gray_recon_watermark,
gray_mu, gray_logvar) = model(gray=gray_image)
(edge_recon_image, edge_recon_gray, edge_recon_edge,
edge_recon_mask, edge_recon_obscured, edge_recon_watermark,
edge_mu, edge_logvar) = model(edge=edge_image)
(mask_recon_image, mask_recon_gray, mask_recon_edge,
mask_recon_mask, mask_recon_obscured, mask_recon_watermark,
mask_mu, mask_logvar) = model(mask=mask_image)
(obscured_recon_image, obscured_recon_gray, obscured_recon_edge,
obscured_recon_mask, obscured_recon_obscured, obscured_recon_watermark,
obscured_mu, obscured_logvar) = model(obscured=obscured_image)
(watermark_recon_image, watermark_recon_gray, watermark_recon_edge,
watermark_recon_mask, watermark_recon_obscured, watermark_recon_watermark,
watermark_mu, watermark_logvar) = model(watermark=watermark_image)
# compute joint loss
joint_train_loss = elbo_loss(joint_recon_image, image,
joint_recon_gray, gray_image,
joint_recon_edge, edge_image,
joint_recon_mask, mask_image,
joint_recon_obscured, obscured_image,
joint_recon_watermark, watermark_image,
joint_mu, joint_logvar,
annealing_factor=annealing_factor)
# compute loss with unimodal inputs
image_train_loss = elbo_loss(image_recon_image, image,
image_recon_gray, gray_image,
image_recon_edge, edge_image,
image_recon_mask, mask_image,
image_recon_obscured, obscured_image,
image_recon_watermark, watermark_image,
image_mu, image_logvar,
annealing_factor=annealing_factor)
gray_train_loss = elbo_loss(gray_recon_image, image,
gray_recon_gray, gray_image,
gray_recon_edge, edge_image,
gray_recon_mask, mask_image,
gray_recon_obscured, obscured_image,
gray_recon_watermark, watermark_image,
gray_mu, joint_logvar,
annealing_factor=annealing_factor)
edge_train_loss = elbo_loss(edge_recon_image, image,
edge_recon_gray, gray_image,
edge_recon_edge, edge_image,
edge_recon_mask, mask_image,
edge_recon_obscured, obscured_image,
edge_recon_watermark, watermark_image,
edge_mu, edge_logvar,
annealing_factor=annealing_factor)
mask_train_loss = elbo_loss(mask_recon_image, image,
mask_recon_gray, gray_image,
mask_recon_edge, edge_image,
mask_recon_mask, mask_image,
mask_recon_obscured, obscured_image,
mask_recon_watermark, watermark_image,
mask_mu, mask_logvar,
annealing_factor=annealing_factor)
obscured_train_loss = elbo_loss(obscured_recon_image, image,
obscured_recon_gray, gray_image,
obscured_recon_edge, edge_image,
obscured_recon_mask, mask_image,
obscured_recon_obscured, obscured_image,
obscured_recon_watermark, watermark_image,
obscured_mu, obscured_logvar,
annealing_factor=annealing_factor)
watermark_train_loss = elbo_loss(watermark_recon_image, image,
watermark_recon_gray, gray_image,
watermark_recon_edge, edge_image,
watermark_recon_mask, mask_image,
watermark_recon_obscured, obscured_image,
watermark_recon_watermark, watermark_image,
watermark_mu, watermark_logvar,
annealing_factor=annealing_factor)
train_loss = joint_train_loss + image_train_loss + gray_train_loss \
+ edge_train_loss + mask_train_loss + obscured_train_loss \
+ watermark_train_loss
train_loss_meter.update(train_loss.data[0], len(image))
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss = 0
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, gray_image, edge_image, mask_image,
obscured_image, watermark_image) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
gray_image = gray_image.cuda()
edge_image = edge_image.cuda()
mask_image = mask_image.cuda()
obscured_image = obscured_image.cuda()
watermark_image = watermark_image.cuda()
image = Variable(image)
gray_image = Variable(gray_image)
edge_image = Variable(edge_image)
mask_image = Variable(mask_image)
obscured_image = Variable(obscured_image)
watermark_image = Variable(watermark_image)
batch_size = image.size(0)
# for ease, only compute the joint loss
(joint_recon_image, joint_recon_gray, joint_recon_edge,
joint_recon_mask, joint_recon_obscured, joint_recon_watermark,
joint_mu, joint_logvar) = model(batch_size, image, gray_image, edge_image,
mask_image, obscured_image, watermark_image)
test_loss += loss_function(joint_recon_image, image,
joint_recon_gray, gray_image,
joint_recon_edge, edge_image,
joint_recon_mask, mask_image,
joint_recon_obscured, obscured_image,
joint_recon_watermark, watermark_image,
joint_mu, joint_logvar).data[0]
if batch_idx == 0:
# from time to time, plot the reconstructions to see how well the model is learning
n = min(batch_size, 8)
image_comparison = torch.cat(
[image[:n],
F.sigmoid(joint_recon_image).view(args.batch_size, 3, 64, 64)[:n]])
gray_comparison = torch.cat([
gray_image[:n],
F.sigmoid(joint_recon_gray).view(args.batch_size, 1, 64, 64)[:n]])
edge_comparison = torch.cat([
edge_image[:n],
F.sigmoid(joint_recon_edge).view(args.batch_size, 1, 64, 64)[:n]])
mask_comparison = torch.cat([
mask_image[:n],
F.sigmoid(joint_recon_mask).view(args.batch_size, 1, 64, 64)[:n]])
obscured_comparison = torch.cat([
obscured_image[:n],
F.sigmoid(joint_recon_obscured).view(args.batch_size, 3, 64, 64)[:n]])
watermark_comparison = torch.cat([
watermark_image[:n],
F.sigmoid(joint_recon_watermark).view(args.batch_size, 3, 64, 64)[:n]])
# save these reconstructions
save_image(image_comparison.data.cpu(),
'./results/image/reconstruction_%d.png' % epoch, nrow=n)
save_image(gray_comparison.data.cpu(),
'./results/gray/reconstruction_%d.png' % epoch, nrow=n)
save_image(edge_comparison.data.cpu(),
'./results/edge/reconstruction_%d.png' % epoch, nrow=n)
save_image(mask_comparison.data.cpu(),
'./results/mask/reconstruction_%d.png' % epoch, nrow=n)
save_image(obscured_comparison.data.cpu(),
'./results/obscured/reconstruction_%d.png' % epoch, nrow=n)
save_image(watermark_comparison.data.cpu(),
'./results/watermark/reconstruction_%d.png' % epoch, nrow=n)
pbar.update()
pbar.close()
test_loss /= len(test_loader)
print('====> Test Loss: {:.4f}'.format(test_loss))
return test_loss
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 19,025 | 47.659847 | 107 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
from train import load_checkpoint
def fetch_mnist_image(label):
"""Return a random image from the MNIST dataset with label.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
MNIST image
"""
mnist_dataset = datasets.MNIST('./data', train=False, download=True,
transform=transforms.ToTensor())
images = mnist_dataset.test_data.numpy()
labels = mnist_dataset.test_labels.numpy()
images = images[labels == label]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_mnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = torch.LongTensor([label])
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_mnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_mnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_mnist_image(args.condition_on_image)
text = fetch_mnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 28, 28),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
txt_recon_np = txt_recon.numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
fp.write('Text (%d): %s\n' % (i, item))
| 4,692 | 37.154472 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.parameter import Parameter
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents)
self.text_decoder = TextDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.fc1 = nn.Linear(784, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, n_latents)
self.fc32 = nn.Linear(512, n_latents)
self.swish = Swish()
def forward(self, x):
h = self.swish(self.fc1(x.view(-1, 784)))
h = self.swish(self.fc2(h))
return self.fc31(h), self.fc32(h)
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.fc1 = nn.Linear(n_latents, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 512)
self.fc4 = nn.Linear(512, 784)
self.swish = Swish()
def forward(self, z):
h = self.swish(self.fc1(z))
h = self.swish(self.fc2(h))
h = self.swish(self.fc3(h))
return self.fc4(h) # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextEncoder, self).__init__()
self.fc1 = nn.Embedding(10, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, n_latents)
self.fc32 = nn.Linear(512, n_latents)
self.swish = Swish()
def forward(self, x):
h = self.swish(self.fc1(x))
h = self.swish(self.fc2(h))
return self.fc31(h), self.fc32(h)
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextDecoder, self).__init__()
self.fc1 = nn.Linear(n_latents, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 512)
self.fc4 = nn.Linear(512, 10)
self.swish = Swish()
def forward(self, z):
h = self.swish(self.fc1(z))
h = self.swish(self.fc2(h))
h = self.swish(self.fc3(h))
return self.fc4(h) # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / (var + eps)
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.zeros(size))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 5,973 | 31.11828 | 73 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.datasets import MNIST
from model import MVAE
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 28 * 28),
image.view(-1, 1 * 28 * 28)), dim=1)
if recon_text is not None and text is not None:
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
MNIST('./data', train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
MNIST('./data', train=False, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute gradients and take step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 10,817 | 39.215613 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import FashionMNIST
from model import LABEL_IX_TO_STRING
def fetch_fashionmnist_image(label):
"""Return a random image from the FashionMNIST dataset with label.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
FashionMNIST image
"""
dataset = FashionMNIST('./data', train=False, download=True,
transform=transforms.ToTensor())
images = dataset.test_data.numpy()
labels = dataset.test_labels.numpy()
images = images[labels == label]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_fashionmnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: integer
a integer from 0 to 9
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = torch.LongTensor([label])
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_fashionmnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_fashionmnist_image(args.condition_on_image)
text = fetch_fashionmnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 28, 28),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
txt_recon_np = txt_recon.numpy()
txt_recon_np = np.argmax(txt_recon_np, axis=1).tolist()
for i, item in enumerate(txt_recon_np):
fp.write('Text (%d): %s\n' % (i, LABEL_IX_TO_STRING[item]))
| 4,827 | 37.624 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
# MAP from index to the interpretable label
LABEL_IX_TO_STRING = {0: 'T-shirt/top', 1: 'Trouser', 2: 'Pullover', 3: 'Dress',
4: 'Coat', 5: 'Sandal', 6: 'Shirt', 7: 'Sneaker', 8: 'Bag',
9: 'Ankle boot'}
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents)
self.text_decoder = TextDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
Swish())
self.classifier = nn.Sequential(
nn.Linear(128 * 7 * 7, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.n_latents = n_latents
self.upsampler = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 128 * 7 * 7),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
Swish(),
nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsampler(z)
z = z.view(-1, 128, 7, 7)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextEncoder, self).__init__()
self.net = nn.Sequential(
nn.Embedding(10, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x)
return x[:, :n_latents], x[:, n_latents:]
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(TextDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 10))
def forward(self, z):
z = self.net(z)
return z # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / (var + eps)
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class Swish(nn.Module):
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 6,482 | 30.779412 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from torchvision.datasets import MNIST
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
| 1,428 | 46.633333 | 96 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import FashionMNIST
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 28 * 28),
image.view(-1, 1 * 28 * 28)), dim=1)
if recon_text is not None and text is not None:
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
FashionMNIST('./data', train=True, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
FashionMNIST('./data', train=False, download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + epoch * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute gradients and take step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 10,820 | 39.226766 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from datasets import MultiMNIST
from train import load_checkpoint
from utils import char_tensor, charlist_tensor
from utils import tensor_to_string
def fetch_multimnist_image(label):
"""Return a random image from the MultiMNIST dataset with label.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
MultiMNIST image
"""
dataset = MultiMNIST('./data', train=False, download=True,
transform=transforms.ToTensor(),
target_transform=charlist_tensor)
images = dataset.test_data
labels = dataset.test_labels
n_rows = len(images)
images = []
for i in xrange(n_rows):
image = images[i]
text = labels[i]
if tensor_to_string(text.squeeze(0)) == label:
images.append(image)
if len(images) == 0:
sys.exit('No images with label (%s) found.' % label)
images = torch.cat(images).cpu().numpy()
ix = np.random.choice(np.arange(images.shape[0]))
image = images[ix]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_multimnist_text(label):
"""Randomly generate a number from 0 to 9.
@param label: string
a string of up to 4 digits
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
text = char_tensor(label).unsqueeze(0)
return Variable(text, volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_text:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_text:
image = fetch_multimnist_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.infer(1, image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on text
elif args.condition_on_text and not args.condition_on_image:
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
text = text.cuda()
mu, logvar = model.infer(1, text=text)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and text
elif args.condition_on_text and args.condition_on_image:
image = fetch_multimnist_image(args.condition_on_image)
text = fetch_multimnist_text(args.condition_on_text)
if args.cuda:
image = image.cuda()
text = text.cuda()
mu, logvar = model.infer(1, image=image, text=text)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
img_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
txt_recon = F.log_softmax(model.text_decoder(sample), dim=1).cpu().data
txt_recon = torch.max(txt_recon, dim=2)[1]
# save image samples to filesystem
save_image(img_recon.view(args.n_samples, 1, 50, 50),
'./sample_image.png')
# save text samples to filesystem
with open('./sample_text.txt', 'w') as fp:
for i in xrange(text_recon.size(0)):
text_recon_str = tensor_to_string(text_recon[i])
fp.write('Text (%d): %s\n' % (i, text_recon_str))
| 5,196 | 36.121429 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/utils.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import string
import random
import time
import math
import torch
from torch.autograd import Variable
max_length = 4 # max of 4 characters in an image
all_characters = '0123456789'
n_characters = len(all_characters)
# add 2 characters; b/c we always generate a fixed number
# of characters, we do not need an EOS token
SOS = n_characters
FILL = n_characters + 1 # placeholder for nothing
n_characters += 2
def char_tensor(string):
"""Turn a string into a tensor.
@param string: str object
@return tensor: torch.Tensor object. Not a Variable.
"""
tensor = torch.ones(max_length).long() * FILL
for c in xrange(len(string)):
tensor[c] = all_characters.index(string[c])
return tensor
def charlist_tensor(charlist):
"""Turn a list of indexes into a tensor."""
string = ''.join([str(i) for i in charlist])
return char_tensor(string)
def tensor_to_string(tensor):
"""Identical to tensor_to_string but for LongTensors."""
string = ''
for i in range(tensor.size(0)):
top_i = tensor[i]
string += index_to_char(top_i)
return string
def index_to_char(top_i):
if top_i == SOS:
return '^'
# FILL is the default character
elif top_i == FILL:
return ''
else:
return all_characters[top_i]
| 1,417 | 23.877193 | 60 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/model.py | """This model will be quite similar to mnist/model.py
except we will need to be slightly fancier in the
encoder/decoders for each modality. Likely, we will need
convolutions/deconvolutions and RNNs.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from utils import n_characters, max_length
from utils import SOS, FILL
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.text_encoder = TextEncoder(n_latents, n_characters, n_hiddens=200,
bidirectional=True)
self.text_decoder = TextDecoder(n_latents, n_characters, n_hiddens=200)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, image=None, text=None):
mu, logvar = self.infer(image, text)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
img_recon = self.image_decoder(z)
txt_recon = self.text_decoder(z)
return img_recon, txt_recon, mu, logvar
def infer(self, image=None, text=None):
batch_size = image.size(0) if image is not None else text.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
img_mu, img_logvar = self.image_encoder(image)
mu = torch.cat((mu, img_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, img_logvar.unsqueeze(0)), dim=0)
if text is not None:
txt_mu, txt_logvar = self.text_encoder(text)
mu = torch.cat((mu, txt_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, txt_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This task is quite a bit harder than MNIST so we probably need
to use an CNN of some form. This will be good to get us ready for
natural images.
@param n_latents: integer
size of latent vector
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 2, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 2 * 2, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 2 * 2)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 2 * 2),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 2, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 5, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 1, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 2, 2)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class TextEncoder(nn.Module):
"""Parametrizes q(z|y).
We train an embedding layer from the 10 digit space
to move to a continuous domain. The GRU is optionally
bidirectional.
@param n_latents: integer
size of latent vector
@param n_characters: integer
number of possible characters (10 for MNIST)
@param n_hiddens: integer [default: 200]
number of hidden units in GRU
@param bidirectional: boolean [default: True]
hyperparameter for GRU.
"""
def __init__(self, n_latents, n_characters, n_hiddens=200, bidirectional=True):
super(TextEncoder, self).__init__()
self.embed = nn.Embedding(n_characters, n_hiddens)
self.gru = nn.GRU(n_hiddens, n_hiddens, 1, dropout=0.1,
bidirectional=bidirectional)
self.h2p = nn.Linear(n_hiddens, n_latents * 2) # hiddens to parameters
self.n_latents = n_latents
self.n_hiddens = n_hiddens
self.bidirectional = bidirectional
def forward(self, x):
n_hiddens = self.n_hiddens
n_latents = self.n_latents
x = self.embed(x)
x = x.transpose(0, 1) # GRU expects (seq_len, batch, ...)
x, h = self.gru(x, None)
x = x[-1] # take only the last value
if self.bidirectional:
x = x[:, :n_hiddens] + x[:, n_hiddens:] # sum bidirectional outputs
x = self.h2p(x)
return x[:, :n_latents], x[:, n_latents:]
class TextDecoder(nn.Module):
"""Parametrizes p(y|z).
GRU for text decoding. Given a start token, sample a character
via an RNN and repeat for a fixed length.
@param n_latents: integer
size of latent vector
@param n_characters: integer
size of characters (10 for MNIST)
@param n_hiddens: integer [default: 200]
number of hidden units in GRU
"""
def __init__(self, n_latents, n_characters, n_hiddens=200):
super(TextDecoder, self).__init__()
self.embed = nn.Embedding(n_characters, n_hiddens)
self.z2h = nn.Linear(n_latents, n_hiddens)
self.gru = nn.GRU(n_hiddens + n_latents, n_hiddens, 2, dropout=0.1)
self.h2o = nn.Linear(n_hiddens + n_latents, n_characters)
self.n_latents = n_latents
self.n_characters = n_characters
def forward(self, z):
n_latents = self.n_latents
n_characters = self.n_characters
batch_size = z.size(0)
# first input character is SOS
c_in = Variable(torch.LongTensor([SOS]).repeat(batch_size))
# store output word here
words = Variable(torch.zeros(batch_size, max_length, n_characters))
if z.is_cuda:
c_in = c_in.cuda()
words = words.cuda()
# get hiddens from latents
h = self.z2h(z).unsqueeze(0).repeat(2, 1, 1)
# look through n_steps and generate characters
for i in xrange(max_length):
c_out, h = self.step(i, z, c_in, h)
sample = torch.max(F.log_softmax(c_out, dim=1), dim=1)[1]
words[:, i] = c_out
c_in = sample
return words # (batch_size, seq_len, ...)
def step(self, ix, z, c_in, h):
c_in = swish(self.embed(c_in))
c_in = torch.cat((c_in, z), dim=1)
c_in = c_in.unsqueeze(0)
c_out, h = self.gru(c_in, h)
c_out = c_out.squeeze(0)
c_out = torch.cat((c_out, z), dim=1)
c_out = self.h2o(c_out)
return c_out, h # NOTE: no softmax here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
def forward(self, x):
return x * F.sigmoid(x)
def swish(x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 9,790 | 34.219424 | 83 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/datasets.py | """
This script generates a dataset similar to the MultiMNIST dataset
described in [1]. However, we remove any translation.
[1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
understanding with generative models." Advances in Neural Information
Processing Systems. 2016.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import numpy as np
import numpy.random as npr
from PIL import Image
from random import shuffle
from scipy.misc import imresize
import torch
import torchvision.datasets as dset
from torch.utils.data.dataset import Dataset
class MultiMNIST(Dataset):
"""Images with 0 to 4 digits of non-overlapping MNIST numbers.
@param root: string
path to dataset root
@param train: boolean [default: True]
whether to return training examples or testing examples
@param transform: ?torchvision.Transforms
optional function to apply to training inputs
@param target_transform: ?torchvision.Transforms
optional function to apply to training outputs
"""
processed_folder = 'multimnist'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
if self._check_exists():
return
make_dataset(self.root, self.processed_folder,
self.training_file, self.test_file)
# -- code for generating MultiMNIST torch objects. --
# INSTRUCTIONS: run this file.
def sample_one(canvas_size, mnist, resize=True, translate=True):
i = np.random.randint(mnist['digits'].shape[0])
digit = mnist['digits'][i]
label = mnist['labels'][i]
if resize: # resize only if user specified
scale = 0.1 * np.random.randn() + 1.3
resized = imresize(digit, 1. / scale)
else:
resized = digit
w = resized.shape[0]
assert w == resized.shape[1]
padding = canvas_size - w
if translate: # translate only if user specified
pad_l = np.random.randint(0, padding)
pad_r = np.random.randint(0, padding)
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
else:
pad_l = padding // 2
pad_r = padding // 2
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
return positioned, label
def sample_multi(num_digits, canvas_size, mnist, resize=True, translate=True):
canvas = np.zeros((canvas_size, canvas_size))
labels = []
for _ in range(num_digits):
positioned_digit, label = sample_one(canvas_size, mnist, resize=resize,
translate=translate)
canvas += positioned_digit
labels.append(label)
# Crude check for overlapping digits.
if np.max(canvas) > 255:
return sample_multi(num_digits, canvas_size, mnist,
resize=resize, translate=translate)
else:
return canvas, labels
def mk_dataset(n, mnist, min_digits, max_digits, canvas_size,
resize=True, translate=True):
x = []
y = []
for _ in range(n):
num_digits = np.random.randint(min_digits, max_digits + 1)
canvas, labels = sample_multi(num_digits, canvas_size, mnist,
resize=resize, translate=translate)
x.append(canvas)
y.append(labels)
return np.array(x, dtype=np.uint8), y
def load_mnist():
train_loader = torch.utils.data.DataLoader(
dset.MNIST(root='./data', train=True, download=True))
test_loader = torch.utils.data.DataLoader(
dset.MNIST(root='./data', train=False, download=True))
train_data = {
'digits': train_loader.dataset.train_data.numpy(),
'labels': train_loader.dataset.train_labels
}
test_data = {
'digits': test_loader.dataset.test_data.numpy(),
'labels': test_loader.dataset.test_labels
}
return train_data, test_data
def make_dataset(root, folder, training_file, test_file, min_digits=0, max_digits=2,
resize=True, translate=True):
if not os.path.isdir(os.path.join(root, folder)):
os.makedirs(os.path.join(root, folder))
np.random.seed(681307)
train_mnist, test_mnist = load_mnist()
train_x, train_y = mk_dataset(60000, train_mnist, min_digits, max_digits, 50,
resize=resize, translate=translate)
test_x, test_y = mk_dataset(10000, test_mnist, min_digits, max_digits, 50,
resize=resize, translate=translate)
train_x = torch.from_numpy(train_x).byte()
test_x = torch.from_numpy(test_x).byte()
training_set = (train_x, train_y)
test_set = (test_x, test_y)
with open(os.path.join(root, folder, training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(root, folder, test_file), 'wb') as f:
torch.save(test_set, f)
def sample_one_fixed(canvas_size, mnist, pad_l, pad_r, scale=1.3):
i = np.random.randint(mnist['digits'].shape[0])
digit = mnist['digits'][i]
label = mnist['labels'][i]
resized = imresize(digit, 1. / scale)
w = resized.shape[0]
assert w == resized.shape[1]
padding = canvas_size - w
pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
return positioned, label
def sample_multi_fixed(num_digits, canvas_size, mnist, reverse=False,
scramble=False, no_repeat=False):
canvas = np.zeros((canvas_size, canvas_size))
labels = []
pads = [(4, 4), (4, 23), (23, 4), (23, 23)]
for i in range(num_digits):
if no_repeat: # keep trying to generate examples that are
# not already in previously generated labels
while True:
positioned_digit, label = sample_one_fixed(
canvas_size, mnist, pads[i][0], pads[i][1])
if label not in labels:
break
else:
positioned_digit, label = sample_one_fixed(
canvas_size, mnist, pads[i][0], pads[i][1])
canvas += positioned_digit
labels.append(label)
if reverse and random.random() > 0.5:
labels = labels[::-1]
if scramble:
random.shuffle(labels)
# Crude check for overlapping digits.
if np.max(canvas) > 255:
return sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse,
scramble=scramble, no_repeat=no_repeat)
else:
return canvas, labels
def mk_dataset_fixed(n, mnist, min_digits, max_digits, canvas_size,
reverse=False, scramble=False, no_repeat=False):
x = []
y = []
for _ in range(n):
num_digits = np.random.randint(min_digits, max_digits + 1)
canvas, labels = sample_multi_fixed(num_digits, canvas_size, mnist, reverse=reverse,
scramble=scramble, no_repeat=no_repeat)
x.append(canvas)
y.append(labels)
return np.array(x, dtype=np.uint8), y
def make_dataset_fixed(root, folder, training_file, test_file,
min_digits=0, max_digits=3, reverse=False,
scramble=False, no_repeat=False):
if not os.path.isdir(os.path.join(root, folder)):
os.makedirs(os.path.join(root, folder))
np.random.seed(681307)
train_mnist, test_mnist = load_mnist()
train_x, train_y = mk_dataset_fixed(60000, train_mnist, min_digits, max_digits, 50,
reverse=reverse, scramble=scramble, no_repeat=no_repeat)
test_x, test_y = mk_dataset_fixed(10000, test_mnist, min_digits, max_digits, 50,
reverse=reverse, scramble=scramble, no_repeat=no_repeat)
train_x = torch.from_numpy(train_x).byte()
test_x = torch.from_numpy(test_x).byte()
training_set = (train_x, train_y)
test_set = (test_x, test_y)
with open(os.path.join(root, folder, training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(root, folder, test_file), 'wb') as f:
torch.save(test_set, f)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--min-digits', type=int, default=0,
help='minimum number of digits to add to an image')
parser.add_argument('--max-digits', type=int, default=4,
help='maximum number of digits to add to an image')
parser.add_argument('--no-resize', action='store_true', default=False,
help='if True, fix the image to be MNIST size')
parser.add_argument('--no-translate', action='store_true', default=False,
help='if True, fix the image to be in the center')
parser.add_argument('--fixed', action='store_true', default=False,
help='If True, ignore resize/translate options and generate')
parser.add_argument('--scramble', action='store_true', default=False,
help='If True, scramble labels and generate. Only does something if fixed is True.')
parser.add_argument('--reverse', action='store_true', default=False,
help='If True, reverse flips the labels i.e. 4321 instead of 1234 with 0.5 probability.')
parser.add_argument('--no-repeat', action='store_true', default=False,
help='If True, do not generate images with multiple of the same label.')
args = parser.parse_args()
args.resize = not args.no_resize
args.translate = not args.no_translate
if args.no_repeat and not args.fixed:
raise Exception('Must have --fixed if --no-repeat is supplied.')
if args.scramble and not args.fixed:
raise Exception('Must have --fixed if --scramble is supplied.')
if args.reverse and not args.fixed:
raise Exception('Must have --fixed if --reverse is supplied.')
if args.reverse and args.scramble:
print('Found --reversed and --scrambling. Overriding --reversed.')
args.reverse = False
# Generate the training set and dump it to disk. (Note, this will
# always generate the same data, else error out.)
if args.fixed:
make_dataset_fixed('./data', 'multimnist', 'training.pt', 'test.pt',
min_digits=args.min_digits, max_digits=args.max_digits,
reverse=args.reverse, scramble=args.scramble,
no_repeat=args.no_repeat)
else: # if not fixed, then make classic MultiMNIST dataset
# VAEs in general have trouble handling translation and rotation,
# likely resulting in blurry reconstructions without additional
# attention mechanisms. See AIR [1].
make_dataset('./data', 'multimnist', 'training.pt', 'test.pt',
min_digits=args.min_digits, max_digits=args.max_digits,
resize=args.resize, translate=args.translate)
| 13,354 | 37.93586 | 113 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import MultiMNIST
from utils import charlist_tensor
def elbo_loss(recon_image, image, recon_text, text, mu, logvar,
lambda_image=1.0, lambda_text=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_text: torch.Tensor
reconstructed text probabilities
@param text: torch.Tensor
input text (one-hot)
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_text: float [default: 1.0]
weight for text BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, text_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 1 * 50 * 50),
image.view(-1, 1 * 50 * 50)), dim=1)
if recon_text is not None and text is not None:
batch_size, n_digits = recon_text.size(0), recon_text.size(1)
recon_text = recon_text.view(-1, recon_text.size(2))
text = text.view(-1)
# sum over the different classes
text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)
text_bce = text_bce.view(batch_size, n_digits)
# sum over the number of digits
text_bce = torch.sum(text_bce, dim=1)
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def cross_entropy(input, target, eps=1e-6):
"""k-Class Cross Entropy (Log Softmax + Log Loss)
@param input: torch.Tensor (size N x K)
@param target: torch.Tensor (size N x K)
@param eps: error to add (default: 1e-6)
@return loss: torch.Tensor (size N)
"""
if not (target.size(0) == input.size(0)):
raise ValueError(
"Target size ({}) must be the same as input size ({})".format(
target.size(0), input.size(0)))
log_input = F.log_softmax(input + eps, dim=1)
y_onehot = Variable(log_input.data.new(log_input.size()).zero_())
y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)
loss = y_onehot * log_input
return -loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=64,
help='size of the latent embedding [default: 64]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train [default: 500]')
parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',
help='number of epochs to anneal KL for [default: 200]')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate [default: 1e-3]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-text', type=float, default=10.,
help='multipler for text reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
train_loader = torch.utils.data.DataLoader(
MultiMNIST('./data', train=True, download=True, transform=transforms.ToTensor(),
target_transform=charlist_tensor),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
MultiMNIST('./data', train=False, download=True, transform=transforms.ToTensor(),
target_transform=charlist_tensor),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, text) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image)
text = Variable(text)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_text=args.lambda_text,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + text_loss
train_loss_meter.update(train_loss.data[0], batch_size)
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
for batch_idx, (image, text) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
text = text.cuda()
image = Variable(image, volatile=True)
text = Variable(text, volatile=True)
batch_size = len(image)
recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)
recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)
joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)
text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)
test_loss = joint_loss + image_loss + text_loss
test_loss_meter.update(test_loss.data[0], batch_size)
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
test_loss = test(epoch)
is_best = test_loss < best_loss
best_loss = min(test_loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 11,314 | 39.555556 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoint
from datasets import ATTR_IX_TO_KEEP, N_ATTRS
from datasets import ATTR_TO_IX_DICT, IX_TO_ATTR_DICT
from datasets import tensor_to_attributes
from datasets import CelebAttributes
def fetch_celeba_image(attr_str):
"""Return a random image from the CelebA dataset with label.
@param label: string
name of the attribute (see ATTR_TO_IX_DICT)
@return: torch.autograd.Variable
CelebA image
"""
loader = torch.utils.data.DataLoader(
CelebAttributes(
partition='test',
image_transform=transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])),
batch_size=128, shuffle=False)
images, attrs = [], []
for batch_idx, (image, attr) in enumerate(loader):
images.append(image)
attrs.append(attr)
images = torch.cat(images).cpu().numpy()
attrs = torch.cat(attrs).cpu().numpy()
attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str])
images = images[attrs[:, attr_ix] == 1]
image = images[np.random.choice(np.arange(images.shape[0]))]
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
return Variable(image, volatile=True)
def fetch_celeba_attrs(attr_str):
"""Return a random image from the CelebA dataset with label.
@param label: string
name of the attribute (see ATTR_TO_IX_DICT)
@return: torch.autograd.Variable
Variable wrapped around an integer.
"""
attrs = torch.zeros(N_ATTRS)
attr_ix = ATTR_IX_TO_KEEP.index(ATTR_TO_IX_DICT[attr_str])
attrs[attr_ix] = 1
return Variable(attrs.unsqueeze(0), volatile=True)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model file')
parser.add_argument('--n-samples', type=int, default=64,
help='Number of images and texts to sample [default: 64]')
# condition sampling on a particular images
parser.add_argument('--condition-on-image', type=int, default=None,
help='If True, generate text conditioned on an image.')
# condition sampling on a particular text
parser.add_argument('--condition-on-text', type=int, default=None,
help='If True, generate images conditioned on a text.')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if args.cuda:
model.cuda()
# mode 1: unconditional generation
if not args.condition_on_image and not args.condition_on_attrs:
mu = Variable(torch.Tensor([0]))
std = Variable(torch.Tensor([1]))
if args.cuda:
mu = mu.cuda()
std = std.cuda()
# mode 2: generate conditioned on image
elif args.condition_on_image and not args.condition_on_attrs:
image = fetch_celeba_image(args.condition_on_image)
if args.cuda:
image = image.cuda()
mu, logvar = model.get_params(image=image)
std = logvar.mul(0.5).exp_()
# mode 3: generate conditioned on attrs
elif args.condition_on_attrs and not args.condition_on_image:
attrs = fetch_celeba_attrs(args.condition_on_attrs)
if args.cuda:
attrs = attrs.cuda()
mu, logvar = model.get_params(attrs=attrs)
std = logvar.mul(0.5).exp_()
# mode 4: generate conditioned on image and attrs
elif args.condition_on_attrs and args.condition_on_image:
image = fetch_celeba_image(args.condition_on_image)
attrs = fetch_celeba_attrs(args.condition_on_attrs)
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
mu, logvar = model.get_params(image=image, attrs=attrs)
std = logvar.mul(0.5).exp_()
# sample from uniform gaussian
sample = Variable(torch.randn(args.n_samples, model.n_latents))
if args.cuda:
sample = sample.cuda()
# sample from particular gaussian by multiplying + adding
mu = mu.expand_as(sample)
std = std.expand_as(sample)
sample = sample.mul(std).add_(mu)
# generate image and text
image_recon = F.sigmoid(model.image_decoder(sample)).cpu().data
attrs_recon = F.sigmoid(model.attrs_decoder(sample)).cpu().data
# save image samples to filesystem
save_image(image_recon.view(args.n_samples, 3, 64, 64),
'./sample_image.png')
# save text samples to filesystem
sample_attrs = []
for i in xrange(attrs_recon.size(0)):
attrs = tensor_to_attributes(attrs_recon[i])
sample_attrs.append(','.join(attrs))
with open('./sample_attrs.txt', 'w') as fp:
for attrs in sample_attrs:
fp.write('%s\n' % attrs)
| 5,535 | 38.542857 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from datasets import N_ATTRS
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
self.attrs_encoder = AttributeEncoder(n_latents)
self.attrs_decoder = AttributeDecoder(n_latents)
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, attrs=None):
mu, logvar = self.infer(image, attrs)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
image_recon = self.image_decoder(z)
attrs_recon = self.attrs_decoder(z)
return image_recon, attrs_recon, mu, logvar
def infer(self, image=None, attrs=None):
batch_size = image.size(0) if image is not None else attrs.size(0)
use_cuda = next(self.parameters()).is_cuda # check if CUDA
# initialize the universal prior expert
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
if attrs is not None:
attrs_mu, attrs_logvar = self.attrs_encoder(attrs)
mu = torch.cat((mu, attrs_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, attrs_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class AttributeEncoder(nn.Module):
"""Parametrizes q(z|y).
We use a single inference network that encodes
all 18 features.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeEncoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(N_ATTRS, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x)
return x[:, :n_latents], x[:, n_latents:]
class AttributeDecoder(nn.Module):
"""Parametrizes p(y|z).
We use a single generative network that decodes
all 18 features.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
Swish(),
nn.Linear(512, N_ATTRS))
def forward(self, z):
z = self.net(z)
# not a one-hotted prediction: this returns a value
# for every single index
return z # NOTE: no sigmoid here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 7,415 | 31.243478 | 74 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import copy
import random
import numpy as np
import numpy.random as npr
from PIL import Image
from random import shuffle
from scipy.misc import imresize
import torch
from torch.utils.data.dataset import Dataset
VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}
# go from label index to interpretable index
ATTR_TO_IX_DICT = {'Sideburns': 30, 'Black_Hair': 8, 'Wavy_Hair': 33, 'Young': 39, 'Heavy_Makeup': 18,
'Blond_Hair': 9, 'Attractive': 2, '5_o_Clock_Shadow': 0, 'Wearing_Necktie': 38,
'Blurry': 10, 'Double_Chin': 14, 'Brown_Hair': 11, 'Mouth_Slightly_Open': 21,
'Goatee': 16, 'Bald': 4, 'Pointy_Nose': 27, 'Gray_Hair': 17, 'Pale_Skin': 26,
'Arched_Eyebrows': 1, 'Wearing_Hat': 35, 'Receding_Hairline': 28, 'Straight_Hair': 32,
'Big_Nose': 7, 'Rosy_Cheeks': 29, 'Oval_Face': 25, 'Bangs': 5, 'Male': 20, 'Mustache': 22,
'High_Cheekbones': 19, 'No_Beard': 24, 'Eyeglasses': 15, 'Bags_Under_Eyes': 3,
'Wearing_Necklace': 37, 'Wearing_Lipstick': 36, 'Big_Lips': 6, 'Narrow_Eyes': 23,
'Chubby': 13, 'Smiling': 31, 'Bushy_Eyebrows': 12, 'Wearing_Earrings': 34}
# we only keep 18 of the more visually distinctive features
# See [1] Perarnau, Guim, et al. "Invertible conditional gans for
# image editing." arXiv preprint arXiv:1611.06355 (2016).
ATTR_IX_TO_KEEP = [4, 5, 8, 9, 11, 12, 15, 17, 18, 20, 21, 22, 26, 28, 31, 32, 33, 35]
IX_TO_ATTR_DICT = {v:k for k, v in ATTR_TO_IX_DICT.iteritems()}
N_ATTRS = len(ATTR_IX_TO_KEEP)
ATTR_TO_PLOT = ['Heavy_Makeup', 'Male', 'Mouth_Slightly_Open', 'Smiling', 'Wavy_Hair']
class CelebAttributes(Dataset):
"""Define dataset of images of celebrities and attributes.
The user needs to have pre-defined the Anno and Eval folder from
http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
@param partition: string
train|val|test [default: train]
See VALID_PARTITIONS global variable.
@param data_dir: string
path to root of dataset images [default: ./data]
@param image_transform: ?torchvision.Transforms
optional function to apply to training inputs
@param attr_transform: ?torchvision.Transforms
optional function to apply to training outputs
"""
def __init__(self, partition='train', data_dir='./data',
image_transform=None, attr_transform=None):
self.partition = partition
self.image_transform = image_transform
self.attr_transform = attr_transform
self.data_dir = data_dir
assert partition in VALID_PARTITIONS.keys()
self.image_paths = load_eval_partition(partition, data_dir=data_dir)
self.attr_data = load_attributes(self.image_paths, partition,
data_dir=data_dir)
self.size = int(len(self.image_paths))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
image_path = os.path.join(self.data_dir, 'img_align_celeba',
self.image_paths[index])
attr = self.attr_data[index]
image = Image.open(image_path).convert('RGB')
if self.image_transform is not None:
image = self.image_transform(image)
if self.attr_transform is not None:
attr = self.attr_transform(attr)
return image, attr
def __len__(self):
return self.size
def load_eval_partition(partition, data_dir='./data'):
"""After downloading the dataset, we can load a subset for
training or testing.
@param partition: string
which subset to use (train|val|test)
@param data_dir: string [default: ./data]
where the images are saved
"""
eval_data = []
with open(os.path.join(data_dir, 'Eval/list_eval_partition.txt')) as fp:
rows = fp.readlines()
for row in rows:
path, label = row.strip().split(' ')
label = int(label)
if label == VALID_PARTITIONS[partition]:
eval_data.append(path)
return eval_data
def load_attributes(paths, partition, data_dir='./data'):
"""Load the attributes into a torch tensor.
@param paths: string
a numpy array of attributes (1 or 0)
@param partition: string
which subset to use (train|val|test)
@param data_dir: string [default: ./data]
where the images are saved
"""
if os.path.isfile(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition)):
attr_data = np.load(os.path.join(data_dir, 'Anno/attr_%s.npy' % partition))
else:
attr_data = []
with open(os.path.join(data_dir, 'Anno/list_attr_celeba.txt')) as fp:
rows = fp.readlines()
for ix, row in enumerate(rows[2:]):
row = row.strip().split()
path, attrs = row[0], row[1:]
if path in paths:
attrs = np.array(attrs).astype(int)
attrs[attrs < 0] = 0
attr_data.append(attrs)
attr_data = np.vstack(attr_data).astype(np.int64)
attr_data = torch.from_numpy(attr_data).float()
return attr_data[:, ATTR_IX_TO_KEEP]
def tensor_to_attributes(tensor):
"""Use this for the <image_transform>.
@param tensor: PyTorch Tensor
D dimensional tensor
@return attributes: list of strings
"""
attrs = []
n = tensor.size(0)
tensor = torch.round(tensor)
for i in xrange(n):
if tensor[i] > 0.5:
attr = IX_TO_ATTR_DICT[ATTR_IX_TO_KEEP[i]]
attrs.append(attr)
return attrs
| 6,170 | 39.333333 | 111 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
from datasets import CelebAttributes
from datasets import N_ATTRS
def elbo_loss(recon_image, image, recon_attrs, attrs, mu, logvar,
lambda_image=1.0, lambda_attrs=1.0, annealing_factor=1):
"""Bimodal ELBO loss function.
@param recon_image: torch.Tensor
reconstructed image
@param image: torch.Tensor
input image
@param recon_attrs: torch.Tensor
reconstructed attribute probabilities
@param attrs: torch.Tensor
input attributes
@param mu: torch.Tensor
mean of latent distribution
@param logvar: torch.Tensor
log-variance of latent distribution
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_attrs: float [default: 1.0]
weight for attribute BCE
@param annealing_factor: integer [default: 1]
multiplier for KL divergence term
@return ELBO: torch.Tensor
evidence lower bound
"""
image_bce, attrs_bce = 0, 0 # default params
if recon_image is not None and image is not None:
image_bce = torch.sum(binary_cross_entropy_with_logits(
recon_image.view(-1, 3 * 64 * 64),
image.view(-1, 3 * 64 * 64)), dim=1)
if recon_attrs is not None and attrs is not None:
for i in xrange(N_ATTRS):
attr_bce = binary_cross_entropy_with_logits(
recon_attrs[:, i], attrs[:, i])
attrs_bce += attr_bce
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(lambda_image * image_bce + lambda_attrs * attrs_bce
+ annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=100,
help='size of the latent embedding [default: 100]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train [default: 100]')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate [default: 1e-4]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-attrs', type=float, default=10.,
help='multipler for attributes reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
# crop the input image to 64 x 64
preprocess_data = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='train', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='val', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
# NOTE: is_paired is 1 if the example is paired
for batch_idx, (image, attrs) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image)
attrs = Variable(attrs)
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
# pass data through model
recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs)
recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs)
# compute ELBO for each data combo
joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
train_loss = joint_loss + image_loss + attrs_loss
train_loss_meter.update(train_loss.data[0], batch_size)
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * len(image), len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss_meter = AverageMeter()
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, attrs) in enumerate(test_loader):
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image, volatile=True)
attrs = Variable(attrs, volatile=True)
batch_size = len(image)
recon_image_1, recon_attrs_1, mu_1, logvar_1 = model(image, attrs)
recon_image_2, recon_attrs_2, mu_2, logvar_2 = model(image)
recon_image_3, recon_attrs_3, mu_3, logvar_3 = model(attrs=attrs)
joint_loss = elbo_loss(recon_image_1, image, recon_attrs_1, attrs, mu_1, logvar_1,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
attrs_loss = elbo_loss(None, None, recon_attrs_3, attrs, mu_3, logvar_3,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs)
test_loss = joint_loss + image_loss + attrs_loss
test_loss_meter.update(test_loss.data[0], batch_size)
pbar.update()
pbar.close()
print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
return test_loss_meter.avg
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder='./trained_models')
| 11,037 | 40.340824 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba19/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
sys.path.append('../celeba')
from datasets import N_ATTRS
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents):
super(MVAE, self).__init__()
self.image_encoder = ImageEncoder(n_latents)
self.image_decoder = ImageDecoder(n_latents)
# have an inference network and decoder for each attribute (18 total)
self.attr_encoders = nn.ModuleList([AttributeEncoder(n_latents)
for _ in xrange(N_ATTRS)])
self.attr_decoders = nn.ModuleList([AttributeDecoder(n_latents)
for _ in xrange(N_ATTRS)])
self.experts = ProductOfExperts()
self.n_latents = n_latents
def reparametrize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else: # return mean during inference
return mu
def forward(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]):
"""Forward pass through the MVAE.
@param image: ?PyTorch.Tensor
@param attrs: list of ?PyTorch.Tensors
If a single attribute is missing, pass None
instead of a Tensor. Regardless if all attributes
are missing, still pass a list of <N_ATTR> None's.
@return image_recon: PyTorch.Tensor
@return attr_recons: list of PyTorch.Tensors (N_ATTRS length)
"""
mu, logvar = self.infer(image, attrs)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
image_recon = self.image_decoder(z)
attr_recons = []
for i in xrange(N_ATTRS):
attr_recon = self.attr_decoders[i](z)
attr_recons.append(attr_recon.squeeze(1))
return image_recon, attr_recons, mu, logvar
def infer(self, image=None, attrs=[None for _ in xrange(N_ATTRS)]):
# get the batch size
if image is not None:
batch_size = len(image)
else:
for i in xrange(N_ATTRS):
if attrs[i] is not None:
batch_size = len(attrs[i])
break
use_cuda = next(self.parameters()).is_cuda # check if CUDA
mu, logvar = prior_expert((1, batch_size, self.n_latents),
use_cuda=use_cuda)
if image is not None:
image_mu, image_logvar = self.image_encoder(image)
mu = torch.cat((mu, image_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, image_logvar.unsqueeze(0)), dim=0)
for i in xrange(N_ATTRS):
if attrs[i] is not None:
attr_mu, attr_logvar = self.attr_encoders[i](attrs[i].long())
mu = torch.cat((mu, attr_mu.unsqueeze(0)), dim=0)
logvar = torch.cat((logvar, attr_logvar.unsqueeze(0)), dim=0)
# product of experts to combine gaussians
mu, logvar = self.experts(mu, logvar)
return mu, logvar
class ImageEncoder(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1, bias=False),
Swish(),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.Conv2d(128, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
Swish())
self.classifier = nn.Sequential(
nn.Linear(256 * 5 * 5, 512),
Swish(),
nn.Dropout(p=0.1),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.features(x)
x = x.view(-1, 256 * 5 * 5)
x = self.classifier(x)
return x[:, :n_latents], x[:, n_latents:]
class ImageDecoder(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder, self).__init__()
self.upsample = nn.Sequential(
nn.Linear(n_latents, 256 * 5 * 5),
Swish())
self.hallucinate = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(128),
Swish(),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
Swish(),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
Swish(),
nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False))
def forward(self, z):
# the input will be a vector of size |n_latents|
z = self.upsample(z)
z = z.view(-1, 256, 5, 5)
z = self.hallucinate(z)
return z # NOTE: no sigmoid here. See train.py
class AttributeEncoder(nn.Module):
"""Parametrizes q(z|y).
We use a single inference network that encodes
a single attribute.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeEncoder, self).__init__()
self.net = nn.Sequential(
nn.Embedding(2, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, n_latents * 2))
self.n_latents = n_latents
def forward(self, x):
n_latents = self.n_latents
x = self.net(x.long())
return x[:, :n_latents], x[:, n_latents:]
class AttributeDecoder(nn.Module):
"""Parametrizes p(y|z).
We use a single generative network that decodes
a single attribute.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(AttributeDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(n_latents, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 512),
Swish(),
nn.Linear(512, 1))
def forward(self, z):
z = self.net(z)
return z # NOTE: no sigmoid here. See train.py
class ProductOfExperts(nn.Module):
"""Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
"""
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * F.sigmoid(x)
def prior_expert(size, use_cuda=False):
"""Universal prior expert. Here we use a spherical
Gaussian: N(0, 1).
@param size: integer
dimensionality of Gaussian
@param use_cuda: boolean [default: False]
cast CUDA on variables
"""
mu = Variable(torch.zeros(size))
logvar = Variable(torch.log(torch.ones(size)))
if use_cuda:
mu, logvar = mu.cuda(), logvar.cuda()
return mu, logvar
| 8,328 | 32.316 | 91 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba19/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
from itertools import combinations
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
sys.path.append('../celeba')
from datasets import N_ATTRS
from datasets import CelebAttributes
def elbo_loss(recon, data, mu, logvar, lambda_image=1.0,
lambda_attrs=1.0, annealing_factor=1.):
"""Compute the ELBO for an arbitrary number of data modalities.
@param recon: list of torch.Tensors/Variables
Contains one for each modality.
@param data: list of torch.Tensors/Variables
Size much agree with recon.
@param mu: Torch.Tensor
Mean of the variational distribution.
@param logvar: Torch.Tensor
Log variance for variational distribution.
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_attr: float [default: 1.0]
weight for attribute BCE
@param annealing_factor: float [default: 1]
Beta - how much to weight the KL regularizer.
"""
assert len(recon) == len(data), "must supply ground truth for every modality."
n_modalities = len(recon)
batch_size = mu.size(0)
BCE = 0 # reconstruction cost
for ix in xrange(n_modalities):
# dimensionality > 1 implies an image
if len(recon[ix].size()) > 1:
recon_ix = recon[ix].view(batch_size, -1)
data_ix = data[ix].view(batch_size, -1)
BCE += lambda_image * torch.sum(binary_cross_entropy_with_logits(recon_ix, data_ix), dim=1)
else: # this is for an attribute
BCE += lambda_attrs * binary_cross_entropy_with_logits(recon[ix], data[ix])
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(BCE + annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def tensor_2d_to_list(x):
# convert a 2D tensor to a list of 1D tensors.
n_dims = x.size(1)
list_of_tensors = []
for i in xrange(n_dims):
list_of_tensors.append(x[:, i])
return list_of_tensors
def enumerate_combinations(n):
"""Enumerate entire pool of combinations.
We use this to define the domain of ELBO terms,
(the pool of 2^19 ELBO terms).
@param n: integer
number of features (19 for Celeb19)
@return: a list of ALL permutations
"""
combos = []
for i in xrange(2, n): # 1 to n - 1
_combos = list(combinations(range(n), i))
combos += _combos
combos_np = np.zeros((len(combos), n))
for i in xrange(len(combos)):
for idx in combos[i]:
combos_np[i][idx] = 1
combos_np = combos_np.astype(np.bool)
return combos_np
def sample_combinations(pool, size=1):
"""Return boolean list of which data points to use to compute a modality.
Ignore combinations that are all True or only contain a single True.
@param pool: np.array
enumerating all possible combinations.
@param size: integer (default: 1)
number of combinations to sample.
"""
n_modalities = pool.shape[1]
pool_size = len(pool)
pool_sums = np.sum(pool, axis=1)
pool_dist = np.bincount(pool_sums)
pool_space = np.where(pool_dist > 0)[0]
sample_pool = np.random.choice(pool_space, size, replace=True)
sample_dist = np.bincount(sample_pool)
if sample_dist.size < n_modalities:
zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int)
sample_dist = np.concatenate((sample_dist, zeros_pad))
sample_combo = []
for ix in xrange(n_modalities):
if sample_dist[ix] > 0:
pool_i = pool[pool_sums == ix]
combo_i = np.random.choice(range(pool_i.shape[0]),
size=sample_dist[ix],
replace=False)
sample_combo.append(pool_i[combo_i])
sample_combo = np.concatenate(sample_combo)
return sample_combo
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=100,
help='size of the latent embedding [default: 100]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train [default: 100]')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate [default: 1e-4]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--approx-m', type=int, default=1,
help='number of ELBO terms to approx. the full MVAE objective [default: 1]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-attrs', type=float, default=10.,
help='multipler for attributes reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
# crop the input image to 64 x 64
preprocess_data = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='train', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='val', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
# enumerate all combinations so we can sample from this
# every gradient step. NOTE: probably not the most efficient
# way to do this but oh well.
combination_pool = enumerate_combinations(19)
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
for batch_idx, (image, attrs) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image)
attrs = Variable(attrs)
attrs = tensor_2d_to_list(attrs) # convert tensor to list
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
train_loss = 0 # accumulate train loss here so we don't store a lot of things.
n_elbo_terms = 0 # track number of ELBO terms
# compute ELBO using all data (``complete")
recon_image, recon_attrs, mu, logvar = model(image, attrs)
train_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only image data
recon_image, _, mu, logvar = model(image=image)
train_loss += elbo_loss([recon_image], [image], mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only text data
for ix in xrange(len(attrs)):
_, recon_attrs, mu, logvar = model(attrs=[attrs[k] if k == ix else None
for k in xrange(len(attrs))])
train_loss += elbo_loss([recon_attrs[ix]], [attrs[ix]], mu, logvar,
annealing_factor=annealing_factor)
n_elbo_terms += 1
# sample some number of terms
if args.approx_m > 0:
sample_combos = sample_combinations(combination_pool, size=args.approx_m)
for sample_combo in sample_combos:
attrs_combo = sample_combo[1:]
recon_image, recon_attrs, mu, logvar = model(image=image if sample_combo[0] else None,
attrs=[attrs[ix] if attrs_combo[ix] else None
for ix in xrange(attrs_combo.size)])
if sample_combo[0]: # check if image is present
elbo = elbo_loss([recon_image] + [recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[image] + [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
else:
elbo = elbo_loss([recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
train_loss += elbo
n_elbo_terms += 1
assert n_elbo_terms == (len(attrs) + 1) + 1 + args.approx_m # N + 1 + M
train_loss_meter.update(train_loss.data[0], len(image))
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss = 0
# for simplicitly, here i'm only going to track the joint loss.
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, attrs) in enumerate(test_loader):
if args.cuda:
image, attrs = image.cuda(), attrs.cuda()
image = Variable(image, volatile=True)
attrs = Variable(attrs, volatile=True)
batch_size = image.size(0)
attrs = tensor_2d_to_list(attrs)
# compute the elbo using all data.
recon_image, recon_attrs, mu, logvar = model(image, attrs)
test_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar).data[0]
pbar.update()
pbar.close()
test_loss /= len(test_loader)
print('====> Test Loss: {:.4f}'.format(test_loss))
return test_loss
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder=args.out_dir)
| 14,718 | 40.345506 | 129 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/main.py | from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import yaml
import argparse
import utilities
import os
import torch
import shutil
def datasetFactory(config, do, args=None):
c_data =config["data"]
if args is None:
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = do, config=config, args = None)
elif args is not None:
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = args.data_base,
PATH = args.PATH)
return utilities.MyLoader(GL=gl, do = do, config=config, args=args)
def main(args, config = None):
if config is None:
with open(args.config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
print(args)
model = utilities.choosing_model(config)
print(model)
if args.all_ckp == False:
save_file = os.path.join(config["ckpt"]["PATH"],
config["ckpt"]["save_dir"]
)
checkpoint_callback = ModelCheckpoint(
dirpath=save_file,
every_n_epochs = 1,
save_last = True,
monitor = 'val_loss',
mode = 'min',
save_top_k = args.save_top_k,
filename="model-{epoch:03d}-{val_loss:.4f}",
)
elif args.all_ckp == True:
save_file = os.path.join(config["ckpt"]["PATH"], "all_epochs",
f'{config["train"]["epochs"]}_{config["model"]["activ"]}',
config["ckpt"]["save_dir"]
)
checkpoint_callback = ModelCheckpoint(
dirpath=save_file,
every_n_epochs = 1,
save_top_k = -1,
filename="model-{epoch:03d}-{val_loss:.4f}",
)
if os.path.exists(save_file):
print(f"The model directory exists. Overwrite? {args.erase}")
if args.erase == True:
shutil.rmtree(save_file)
if args.checkpoint is None:
#left the default values provided by the config file
train_dataloader, val_dataloader = datasetFactory(config=config, do=args.do, args=None)
max_epochs = config["train"]["epochs"]
elif args.checkpoint is not None:
print(f"Load from checkpoint {args.checkpoint}")
#model=model.load_from_checkpoint(args.checkpoint)
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
print(checkpoint.keys())
model.load_state_dict(checkpoint["state_dict"])
print(model.learning_rate)
#change optimizer if needed
if args.lr is None:
args.lr = config["train"]["lr"]
if args.weight_decay is None:
args.weight_decay = config["train"]["weight_decay"]
if args.optimizer is not None:
if args.optimizer == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
elif args.optimizer == "Adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#change the scheduler if needed
if args.scheduler is not None:
if args.scheduler == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True, eps=1e-08, min_lr=0)
elif args.scheduler == "CosineAnnealingLR":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10, eta_min=0, last_epoch=-1)
else:
if args.step_size is None:
args.step_size = config["train"]["step_size"]
if args.gamma is None:
args.gamma = config["train"]["gamma"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
model.configure_optimizers(optimizer, scheduler)
#change the number of epochs if needed
if args.epochs is not None:
print(f"Change the number of epochs to {args.epochs}")
max_epochs = args.epochs
else:
max_epochs = config["train"]["epochs"]
print(checkpoint["epoch"])
#change the filename if needed
if args.filename is not None:
print(f"Change the filename to {args.filename}")
checkpoint_callback.filename = args.filename+"-{epoch:03d}-{val_loss:.4f}"
if args.data_base is None:
args.data_base = config["data"]["process"]
args.PATH = config["data"]["PATH"]
else:
args.PATH = os.path.join('save_files', 'acoustic', args.data_base)
#change the config file if needed through the command line
train_dataloader, val_dataloader = datasetFactory(config=config, do = args.do, args=args)
if args.usual_ckpt == True:
trainer = pl.Trainer(max_epochs=max_epochs,
accelerator=args.accelerator,
devices=args.devices,
default_root_dir=save_file)
elif args.usual_ckpt == False:
trainer = pl.Trainer(max_epochs=max_epochs,
accelerator=args.accelerator,
devices=args.devices,
callbacks=[checkpoint_callback])
if args.resume == True:
trainer.fit(model, train_dataloader, val_dataloader, ckpt_path=args.checkpoint)
else:
trainer.fit(model, train_dataloader, val_dataloader)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Training of the Architectures', add_help=True)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-a', '--all_ckp', type = bool,
help='Allow to save all the ckpt',
default=False)
parser.add_argument('-u_ckpt', '--usual_ckpt', type = bool,
help='Allow to save the usual ckpt as in pytorch-lightning\
use it only if you want to save the ckpt in a different directory\
or multiple analysis of networks',
default=False)
parser.add_argument('-e', '--erase', type = bool,
help='erase_save_dir',
default=False)
parser.add_argument( '-ckpt', '--checkpoint', type = str,
help='checkpoint file to load',
default=None)
parser.add_argument('-r', '--resume', type = bool,
help='resume training',
default=False)
parser.add_argument('-savetop', '--save_top_k', type = int,
help='save top k ckpt',
default=3)
parser.add_argument('-lr', '--lr', type = float,
help='learning rate',
default=None)
parser.add_argument('-o', '--optimizer', type = str,
help='optimizer',
default=None)
parser.add_argument('-s', '--scheduler', type = str,
help='scheduler',
default=None)
parser.add_argument('-ep', '--epochs', type = int,
help='number of epochs',
default=None)
parser.add_argument('-f', '--filename', type = str,
help='filename',
default=None)
parser.add_argument('-b', '--batch_size', type = int,
help='batch_size',
default=None)
parser.add_argument('-lw', '--load_workers', type = int,
help='load_workers',
default=None)
parser.add_argument('-db', '--data_base', type = str,
help='database',
default=None)
parser.add_argument('-weight_decay', '--weight_decay', type = float,
help='weight_decay',
default=None)
parser.add_argument('-step_size', '--step_size', type = int,
help='step_size',
default=None)
parser.add_argument('-gamma', '--gamma', type = float,
help='gamma',
default=None)
parser.add_argument('-P', '--PATH', type = str,
help='PATH',
default=None)
parser.add_argument('-d', '--devices', type = int,
help='devices',
default=1)
parser.add_argument('-acc', '--accelerator', type = str,
help='accelerator',
default='gpu')
parser.add_argument('-do', '--do', type=str,
help='do',
default="train")
args=parser.parse_args()
config_file = args.config_file
main(args) | 10,447 | 45.435556 | 153 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/reconstruction_data.py | from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def saving_files(x, y, out, database, name):
PATH = "make_graph/data"+'/'+database+'/'+name
x = to_numpy(x)
y = to_numpy(y)
out = to_numpy(out)
if not os.path.exists(PATH):
os.makedirs(PATH)
os.chdir(PATH)
np.save('wavespeed.npy', x)
np.save('data.npy', y)
np.save(f'data_{name}.npy', out)
def datasetFactoryTest(config):
c_data =config["data"]
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = "test", config=config)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Getting data from the test set', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
if config["Project"]["name"]== "sFNO+epsilon_v2":
file_ckpt = "epoch=199-step=166800.ckpt"
else:
file_ckpt = "epoch=99-step=50000.ckpt"
c_save = config["ckpt"]
model = choosing_model(config)
test_dataloader = datasetFactoryTest(config)
myloss = utilities.LpLoss(size_average=False)
PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\
"checkpoints", file_ckpt)
checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
k_list = [k for k in range(10)]
save = True
batch_size = 20
with torch.no_grad():
for x, y in test_dataloader:
s= x.shape[2]
x, y = (x[:batch_size,...]).cuda(), (y[:batch_size,...]).cuda()
out = model(x).reshape(batch_size, s, s, -1)
break
saving_files(x, y, out, database=config["data"]["process"], name =config["ckpt"]["alias"])
| 2,493 | 32.253333 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/reconstruction_plot.py | from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def plotting(in_, NN_out, out, name, database,
k_list =[1,2,3,4], save=False, vmin=-0.5, vmax =0.5,
shrink = 0.8):
in_ = to_numpy(in_)[k_list,...]
NN_out = to_numpy(NN_out)[k_list,...]
out = to_numpy(out)[k_list,...]
for k in k_list:
if database == 'GRF_7Hz':
s = 128
in_k = in_[k,...].reshape(s,s)
out_k = out[k,...].reshape(s,s)
NN_k =NN_out[k,...].reshape(s,s)
plt.figure(figsize=(10,10))
plt.subplot(131)
plt.imshow(in_k, vmin=1., vmax =3., cmap = 'jet')
plt.colorbar(shrink =shrink)
plt.title(f'wavespeed: {k}')
plt.subplot(132)
plt.imshow(out_k, vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG sample: {k}')
plt.subplot(133)
plt.imshow(NN_k, vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} sample: {k}')
elif database ==('GRF_12Hz') or ('GRF_15Hz'):
s = 64
in_k = in_[k,...].reshape(s,s)
out_k = out[k,...].reshape(s,s,-1)
NN_k =NN_out[k,...].reshape(s,s,-1)
plt.figure(figsize=(20,10))
plt.subplot(231)
plt.imshow(in_k, vmin=1., vmax =5., cmap = 'jet')
plt.colorbar(shrink =shrink)
plt.title(f'wavespeed: {k}')
plt.subplot(232)
plt.imshow(out_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (real) sample: {k}')
plt.subplot(233)
plt.imshow(NN_k[:,:,0].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (real) sample: {k}')
plt.subplot(235)
plt.imshow(out_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'HDG (imaginary) sample: {k}')
plt.subplot(236)
plt.imshow(NN_k[:,:,1].reshape(s,s), vmin=vmin, vmax =vmax, cmap = 'seismic')
plt.colorbar(shrink =shrink)
plt.title(f'{name} (imaginary) sample: {k}')
if save== True:
saving_dir = f'make_graph/figures/{database}/'+f'{name}'
if not os.path.exists(saving_dir):
os.makedirs(saving_dir)
plt.savefig(f"{saving_dir}/ex_{k}.png")
def datasetFactoryTest(config):
c_data =config["data"]
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = c_data["PATH"])
return utilities.MyLoader(GL=gl, do = "test", config=config)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Plotting from test data', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-s','--shrink', type=float,
help='shrink bar value',
default=0.8)
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
print(config)
c_save = config["ckpt"]
model = choosing_model(config)
if config["Project"]["name"]== "sFNO+epsilon_v2":
file_ckpt = "epoch=199-step=166800.ckpt"
else:
file_ckpt = "epoch=99-step=50000.ckpt"
test_dataloader = datasetFactoryTest(config)
myloss = utilities.LpLoss(size_average=False)
PATH = os.path.join(c_save["PATH"], c_save["save_dir"], "lightning_logs", f"version_{0}",\
"checkpoints", file_ckpt)
checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
k_list = [k for k in range(10)]
save = True
with torch.no_grad():
for x, y in test_dataloader:
batch_size, s= x.shape[0:2]
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s, s,-1)
break
plotting(
in_ = x,
NN_out =out,
out= y,
name=config["ckpt"]["alias"],
database = config["data"]["process"],
k_list= k_list,
save = save,
shrink= args.shrink,
vmin=-0.2,
vmax =0.2) | 4,950 | 35.138686 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/OOD.py | import yaml
from evaluation import saving_files
import argparse
import utilities
from utilities import to_numpy
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
def load_ood(arg, size = 64, dir_skeleton= None):
if dir_skeleton is None:
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)
dir_ood = os.path.join('OOD', "OOD_files", dir_skeleton)
data = np.load(os.path.join(dir_ood, f'data_set{args.ood_sample}_freq{args.freq}.npy'))
model = np.load(os.path.join(dir_ood, f'model_set{args.ood_sample}.npy'))
model = torch.tensor(model*1e-3, dtype=torch.float).view(-1,size,size,1)
data =torch.tensor(data, dtype=torch.float).view(-1, size,size,2)
print(f'vp= {model.shape}, data={data.shape}')
return model, data
def test_ood(config, args, name =None, dir_skeleton= None, realization_k = 0, x=None, y=None):
if dir_skeleton is None:
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}'
if name is None:
name= config["ckpt"]["alias"]
model = utilities.choosing_model(config)
if x is None or y is None:
x, y =load_ood(args)
myloss = utilities.LpLoss(size_average=False)
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
loss_dict = {
'test_loss_ood': 0.0
}
x, y = x.cuda(), y.cuda()
batch_size, s, s, _ = x.shape
out = model(x).reshape(batch_size, s, s, -1)
loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss_dict['test_loss_ood']+= loss_test.item()/batch_size
print(f"test test_loss_ood: {loss_dict['test_loss_ood']}")
if args.save_graph:
print("generating and saving graph")
utilities.plotting(in_ = x, NN_out = out, out = y,
name = name, database=dir_skeleton, PATH='OOD', ksample=realization_k)
if args.save_npy:
print("saving npy files")
utilities.saving_files(in_files = to_numpy(x), out_files = to_numpy(y),
NN_out_files = to_numpy(out), NN_name = name,
database=dir_skeleton, PATH='OOD', realization_k = realization_k)
return loss_dict['test_loss_ood']
if __name__ == '__main__':
parser = argparse.ArgumentParser('out of distribution check', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-ckpt', '--checkpoint', type=str,
help='Path to the checkpoint file',
default=None)
parser.add_argument('-ood','--ood_sample', type=int,
help='out of distribution set',
default=0)
parser.add_argument('-sg','--save-graph', type=bool,
help='Saving Image',
default=True)
parser.add_argument('-snpy','--save-npy', type=bool,
help='Saving NPY',
default=True)
parser.add_argument('-f','--freq', type=int,
help='frequency of the OOD',
default=None)
parser.add_argument('-vmax','--vamax', type=float,
help='vmax of the OOD',
default=0.5)
parser.add_argument('-vmin','--vmin', type=float,
help='vmin of the OOD',
default=-0.5)
parser.add_argument('-s','--shrink', type=float,
help='shrink bar value',
default=0.8)
args=parser.parse_args()
config_file = args.config_file
assert args.ood_sample in [0,1,2,3,4,5], "out of distribution sample should be in [0,1,2,3,4,5]"
assert args.freq in [None, 12, 15], "frequency should be in [12,15] Hz"
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
if args.freq is None:
args.freq = config['data']['frequency']
# getting the name of the dataset
dir_skeleton = 'set_{:02d}'.format(args.ood_sample)+f'_freq{args.freq}'
if args.checkpoint is None:
c_save = config["ckpt"]
if config["ckpt"]["alias"]== "sFNO+epsilon_v2":
ckpt = "epoch=199-step=166800.ckpt"
else:
ckpt = "epoch=99-step=50000.ckpt"
x, y =load_ood(args)
list_test = []
for k in range(0,3):
args.checkpoint = os.path.join(c_save["PATH"],
c_save["save_dir"],
"lightning_logs",
f"version_{k}",\
"checkpoints", ckpt)
list_test.append(test_ood(config,args=args, realization_k = k, x=x, y=y))
print(list_test)
saving_files(list_test, database=dir_skeleton, name =config["ckpt"]["alias"], dir_= "OOD")
print(f"Load from checkpoint {args.checkpoint}") | 5,428 | 42.087302 | 100 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/evaluation.py | import yaml
import argparse
import utilities
import os
import torch
import numpy as np
from main import datasetFactory
import pytorch_lightning as pl
def saving_files(data, database, name, dir_= "make_graph"):
if len(data) != 1:
PATH = os.path.join(dir_, "test_loss", database)
if not os.path.exists(PATH):
os.makedirs(PATH)
np.savetxt(os.path.join(PATH, f'{name}.csv'), data, delimiter=",")
else:
PATH = os.path.join(dir_, "test_loss", database,f"{name}.csv")
if not os.path.exists(PATH):
os.makedirs(PATH)
#add a new row in th csv file
with open(PATH, "a") as f:
f.write(str(data[0]))
def test(config, args):
model = utilities.choosing_model(config)
test_dataloader = datasetFactory(config, do = args.do, args=None)
myloss = utilities.LpLoss(size_average=False)
print(f"Load from checkpoint {args.checkpoint}")
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
loss_dict = {
'test_loss': 0.0
}
with torch.no_grad():
for x, y in test_dataloader:
batch_size, s= x.shape[0:2]
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s, s, -1)
loss_test = myloss(out.view(batch_size,-1), y.view(batch_size,-1))
loss_dict['test_loss']+= loss_test.item()
return loss_dict['test_loss'] / len(test_dataloader.dataset)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Testing losses', add_help=False)
parser.add_argument('-c','--config_file', type=str,
help='Path to the configuration file',
default='config/acoustic/GRF_7Hz/FNO25k.yaml')
parser.add_argument('-do', '--do', type=str,
help='do',
default="test")
parser.add_argument('-n', '--numb_samples', type= int, default = 3)
parser.add_argument('-ckpt', '--checkpoint', type=str,
help='Path to the checkpoint file',
default=None)
args=parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
if config["model"]["activ"] is None:
activ = "Identity"
else:
activ = config["model"]["activ"]
database= activ+"_"+config["data"]["process"]
name= config["ckpt"]["alias"]
if args.checkpoint is None:
c_save = config["ckpt"]
if config["ckpt"]["alias"]== "sFNO+epsilon_v2":
ckpt = "epoch=199-step=166800.ckpt"
else:
ckpt = "epoch=99-step=50000.ckpt"
list_test = []
for k in range(0,args.numb_samples):
args.checkpoint = os.path.join(c_save["PATH"],
c_save["save_dir"], "lightning_logs", f"version_{k}",\
"checkpoints", ckpt)
list_test.append(test(config,args=args))
print(list_test)
saving_files(list_test, database=database, name =name)
elif args.checkpoint is not None:
list_test= test(config,args=args)
print(list_test)
saving_files([list_test], database=database, name =name)
| 3,492 | 36.159574 | 98 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v2.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn
from timm.models.layers import DropPath, trunc_normal_
import torch.nn.functional as F
from utilities import LpLoss
from .sFNO import IO_layer
###################################################
# Integral Operator Layer Block with skip connection
###################################################
class IO_ResNetblock(nn.Module):
def __init__(self, features_,
wavenumber,
drop_path = 0.,
drop = 0.):
super().__init__()
self.IO = IO_layer(features_, wavenumber, drop)
self.pwconv1 = nn.Conv2d(features_, 4* features_, 1) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 =nn.Conv2d(4 * features_, features_,1)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
skip = x
x = self.norm1(x)
x =self.IO(x)
x =skip+self.drop_path(x) #NonLocal Layers
skip = x
x = self.norm2(x)
#local
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = skip + self.drop_path(x)
return x
#######################################
#sFNO_epsilon_v2
#######################################
class sFNO_epsilon_v2(pl.LightningModule):
def __init__(self,
in_chans = 3,
out_chans = 1,
modes = [12, 12, 12, 12],
depths = [3,3,9,3],
dims = [36, 36, 32, 34],
drop_path_rate = 0.,
drop = 0.,
head_init_scale=1.,
padding=9,
with_grid = True,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super().__init__()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
self.lifting_layers = nn.ModuleList()
steam = nn.Conv2d(in_chans, dims[0], 1,1)
self.lifting_layers.append(steam)
for i in range(3):
lifting_layers = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1)
)
self.lifting_layers.append(lifting_layers)
self.stages = nn.ModuleList()
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[IO_ResNetblock(features_=dims[i],
wavenumber=[modes[i]]*2,
drop_path=dp_rates[cur + j],
drop =drop) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.head = nn.Conv2d(dims[-1], out_chans,1,1)
def forward_features(self, x):
x=x.permute(0,3,1,2).contiguous()
x = self.lifting_layers[0](x)
x = F.pad(x, [0,self.padding, 0, self.padding])
for i in range(1,4):
x = self.lifting_layers[i](x)
x = self.stages[i](x)
x = x[..., :-self.padding, :-self.padding]
return x
def forward(self, x):
if self.with_grid:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
x = self.forward_features(x)
x = self.head(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
#######################################
# Ensemble of the sFNO_epsilon_v2_proj
# the only diff. is in allowing the projection
# to be taken as an input
#######################################
class sFNO_epsilon_v2_proj(pl.LightningModule):
def __init__(self,
in_chans = 3,
proj = None,
modes = [12, 12, 12, 12],
depths = [3,3,9,3],
dims = [36, 36, 32, 34],
drop_path_rate = 0.,
drop = 0.,
head_init_scale=1.,
padding=9,
with_grid = True,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super().__init__()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
self.lifting_layers = nn.ModuleList()
steam = nn.Conv2d(in_chans, dims[0], 1,1)
self.lifting_layers.append(steam)
for i in range(3):
lifting_layers = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format= "channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size = 1, stride = 1)
)
self.lifting_layers.append(lifting_layers)
self.stages = nn.ModuleList()
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[IO_ResNetblock(features_=dims[i],
wavenumber=[modes[i]]*2,
drop_path=dp_rates[cur + j],
drop =drop) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu",
outermost_norm=False
)
else:
self.proj = proj
def forward_features(self, x):
x=x.permute(0,3,1,2).contiguous()
x = self.lifting_layers[0](x)
x = F.pad(x, [0,self.padding, 0, self.padding])
for i in range(1,4):
x = self.lifting_layers[i](x)
x = self.stages[i](x)
x = x[..., :-self.padding, :-self.padding]
return x
def forward(self, x):
if self.with_grid:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
x = self.forward_features(x)
x = x.permute(0, 2, 3, 1 )
x = self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 10,276 | 35.967626 | 114 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/FNO_residual.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
class FNO_residual_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation= activation)
self.act = set_activ(activation)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.norm1(x)
x = self.IO(x)
x =input+self.drop_path(x) #NonLocal Layers
return x
#######################################
# Ensemble of the sFNO_epsilon_v1
#######################################
class FNO_residual(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
drop_path= 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.no = []
self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )]
for l in range(self.layers):
self.no.append(FNO_residual_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
drop_path= self.dp_rates[l],
activation=activation))
self.no =nn.Sequential(*self.no)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.no(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 5,887 | 34.46988 | 109 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/basics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
##########################################
# Fully connected Layer
##########################################
class FCLayer(nn.Module):
"""Fully connected layer """
def __init__(self, in_feature, out_feature,
activation = "gelu",
is_normalized = True):
super().__init__()
if is_normalized:
self.LinearBlock = nn.Sequential(
nn.Linear(in_feature,out_feature),
LayerNorm(out_feature),
)
else:
self.LinearBlock = nn.Linear(in_feature,out_feature)
self.act = set_activ(activation)
def forward(self, x):
return self.act(self.LinearBlock(x))
##########################################
# Fully connected Block
##########################################
class FC_nn(nn.Module):
r"""Simple MLP to code lifting and projection"""
def __init__(self, sizes = [2, 128, 128, 1],
activation = 'relu',
outermost_linear = True,
outermost_norm = True,
drop = 0.):
super().__init__()
self.dropout = nn.Dropout(drop)
self.net = nn.ModuleList([FCLayer(in_feature= m, out_feature= n,
activation=activation,
is_normalized = False)
for m, n in zip(sizes[:-2], sizes[1:-1])
])
if outermost_linear == True:
self.net.append(FCLayer(sizes[-2],sizes[-1], activation = None,
is_normalized = outermost_norm))
else:
self.net.append(FCLayer(in_feature= sizes[-2], out_feature= sizes[-1],
activation=activation,
is_normalized = outermost_norm))
def forward(self,x):
for module in self.net:
x = module(x)
x = self.dropout(x)
return x
###### Inverse Bottleneck ########
class MLP_inv_bottleneck(nn.Module):
"""Inverse Bottleneck MLP"""
def __init__(self, dim, activation = 'gelu'):
super().__init__()
self.nonlinear = set_activ(activation)
self.L1 = nn.Linear(dim, 4*dim)
self.L2 = nn.Linear(4*dim, dim)
def forward(self,x):
x = self.L1(x)
x = self.nonlinear(x)
x = self.L2(x)
return x
########## Simple MLP ##############
class MLP_join(nn.Module):
"""Simple MLP to code lifting and projection"""
def __init__(self, sizes = [1, 128, 128, 1], activation = 'gelu', drop = 0.):
super(MLP_join, self).__init__()
self.hidden_layer = sizes
self.nonlinear = set_activ(activation)
self.dropout = nn.Dropout(drop)
self.net = nn.ModuleList([nn.Linear(m, n)
for m, n in zip(sizes[:-1], sizes[1:])
])
def forward(self,x):
for module in self.net[:-1]:
x = module(x)
x = self.nonlinear(x)
x = self.dropout(x)
return self.net[-1](x)
########## Layer Normalization ##############
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
######################################################################################
# new additions over the main code
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
######################################################################################
# Miscellaneous functions
######################################################################################
########## Getting the 2D grid using the batch
def get_grid2D(shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
########## Set automatically the activation function for the NN
def set_activ(activation):
if activation is not None:
activation = activation.lower()
if activation == 'relu':
nonlinear = F.relu
elif activation == "leaky_relu":
nonlinear = F.leaky_relu
elif activation == 'tanh':
nonlinear = F.tanh
elif activation == 'sine':
nonlinear= torch.sin
elif activation == 'gelu':
nonlinear= F.gelu
elif activation == 'elu':
nonlinear = F.elu_
elif activation == None:
nonlinear = nn.Identity()
else:
raise Exception('The activation is not recognized from the list')
return nonlinear
| 6,354 | 38.228395 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/FNO.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .basics_model import get_grid2D, set_activ, FC_nn
from utilities import LpLoss
#######################################
# Fourier Convolution,
# \int_D k(x-y) v(y) dy
# = \mathcal{F}^{-1}(P \mathcal{F}(v))
#######################################
class fourier_conv_2d(nn.Module):
def __init__(self, in_, out_, wavenumber1, wavenumber2):
super(fourier_conv_2d, self).__init__()
self.out_ = out_
self.wavenumber1 = wavenumber1
self.wavenumber2 = wavenumber2
scale = (1 / (in_ * out_))
self.weights1 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat))
self.weights2 = nn.Parameter(scale * torch.rand(in_, out_, wavenumber1, wavenumber2, dtype=torch.cfloat))
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
#Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(batchsize, self.out_, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device)
out_ft[:, :, :self.wavenumber1, :self.wavenumber2] = \
self.compl_mul2d(x_ft[:, :, :self.wavenumber1, :self.wavenumber2], self.weights1)
out_ft[:, :, -self.wavenumber1:, :self.wavenumber2] = \
self.compl_mul2d(x_ft[:, :, -self.wavenumber1:, :self.wavenumber2], self.weights2)
#Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
#######################################
# Fourier Layer:
# \sigma( Wx + FourierConv(x))
#######################################
class Fourier_layer(nn.Module):
def __init__(self, features_, wavenumber, activation = 'relu', is_last = False):
super(Fourier_layer, self).__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.fourier_conv = fourier_conv_2d(features_, features_ , *wavenumber)
if is_last== False:
self.act = set_activ(activation)
else:
self.act = set_activ(None)
def forward(self, x):
x1 = self.fourier_conv(x)
x2 = self.W(x)
return self.act(x1 + x2)
#######################################
# FNO: Ensemble of the FNO
#######################################
class FNO(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
activation= 'relu',
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
):
super(FNO, self).__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu",
outermost_norm=False
)
else:
self.proj = proj
self.fno = []
for l in range(self.layers-1):
self.fno.append(Fourier_layer(features_ = features_,
wavenumber=[wavenumber[l]]*2,
activation = activation))
self.fno.append(Fourier_layer(features_=features_,
wavenumber=[wavenumber[-1]]*2,
activation = activation,
is_last= True))
self.fno =nn.Sequential(*self.fno)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = nn.functional.pad(x, [0,self.padding, 0,self.padding])
x = self.fno(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
} | 6,612 | 39.078788 | 119 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v1.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
class MetaFormerNO_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.norm1 = LayerNorm(features_, eps=1e-5, data_format = "channels_first")
self.norm2 = LayerNorm(features_, eps=1e-5, data_format = "channels_last")
self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
self.pwconv2 = nn.Linear(4*features_, features_)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.norm1(x)
x = self.IO(x)
x =input+self.drop_path(x) #NonLocal Layers
input = x
x = x.permute(0, 2, 3, 1)# (N, C, H, W)-> (N, H, W, C)
x = self.norm2(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
#######################################
# Ensemble of the sFNO_epsilon_v1
#######################################
class sFNO_epsilon_v1(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
drop_path= 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.no = []
self.dp_rates = [x.item() for x in torch.linspace(0, drop_path, self.layers )]
for l in range(self.layers):
self.no.append(MetaFormerNO_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
drop_path= self.dp_rates[l],
activation=activation))
self.no =nn.Sequential(*self.no)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.no(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
}
| 6,482 | 35.627119 | 110 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
#######################################
# Integral Operator Layer
#######################################
class IO_layer(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.W = nn.Conv2d(features_, features_, 1)
self.IO = fourier_conv_2d(features_, features_,*wavenumber)
self.act = set_activ(activation)
self.dropout = nn.Dropout(drop)
def forward(self, x):
x = self.IO(x)+self.W(x)
x = self.dropout(x)
x = self.act(x)
return x
#######################################
# Integral Operator Block
#######################################
class IO_Block(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
activation = "relu"):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.pwconv1 = nn.Linear(features_, 4*features_) # pointwise/1x1 convs, implemented with linear layers
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
self.norm = nn.LayerNorm(features_, eps=1e-5)
self.pwconv2 = nn.Linear(4*features_, features_) #
def forward(self, x):
x =(self.IO(x)).permute(0,2,3,1) #B C W H -> B W H C
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
x = self.norm(x)
x = x.permute(0, 3, 1, 2)
return x
#######################################
# sFNO: Ensemble of the sFNO
#######################################
class sFNO(pl.LightningModule):
def __init__(self,
wavenumber, features_,
padding = 9,
lifting = None,
proj = None,
dim_input = 1,
with_grid= True,
loss = "rel_l2",
learning_rate = 1e-2,
step_size= 100,
gamma= 0.5,
weight_decay= 1e-5,
drop = 0.,
activation = "relu"
):
super().__init__()
self.with_grid = with_grid
self.padding = padding
self.layers = len(wavenumber)
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = FC_nn([dim_input, features_//2, features_],
activation = "relu",
outermost_norm=False
)
else:
self.lifting = lifting
if proj is None:
self.proj = FC_nn([features_, features_//2, 1],
activation = "relu", drop = drop,
outermost_norm=False
)
else:
self.proj = proj
self.fno = []
for l in range(self.layers):
self.fno.append(IO_Block(features_ = features_,
wavenumber=[wavenumber[l]]*2,
drop= drop,
activation= activation))
self.fno =nn.Sequential(*self.fno)
def forward(self, x: torch.Tensor):
if self.with_grid == True:
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.lifting(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.fno(x)
x = x[..., :-self.padding, :-self.padding]
x = x.permute(0, 2, 3, 1 )
x =self.proj(x)
return x
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
} | 5,862 | 35.64375 | 110 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v2_updated.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, set_activ, GroupNorm
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath, trunc_normal_
import os
from .sFNO_epsilon_v1 import IO_layer
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features,
hidden_features=None,
out_features=None,
activation = "leaky_relu"):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = set_activ(activation)
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x
#######################################
# Transformer look-alike block with Neural Operators
#######################################
class NOFormerBlock(nn.Module):
def __init__(self, features_,
wavenumber,
drop = 0.,
drop_path= 0.,
activation = "leaky_relu",
use_layer_scale=True,
layer_scale_init_value=1e-5,
norm_layer=GroupNorm,
mlp_ratio=4):
super().__init__()
self.IO = IO_layer(features_=features_,
wavenumber=wavenumber,
drop= drop,
activation = activation)
self.norm1 = norm_layer(features_)
self.norm2 = norm_layer(features_)
self.act = set_activ(activation) if activation is not None else set_activ("gelu")
mlp_hidden_features = int(features_ * mlp_ratio)
self.mlp = Mlp(in_features=features_,
hidden_features=mlp_hidden_features,
activation=activation)
self.drop_path= DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True)
self.layer_scale_2= nn.Parameter(torch.ones((features_))*layer_scale_init_value, requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x+ self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1)*self.IO(self.norm1(x)))
x = x+ self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1)*self.mlp(self.norm2(x)))
else:
x = x+ self.drop_path(self.IO(self.norm1(x)))
x = x+ self.drop_path(self.mlp(self.norm2(x)))
return x
#######################################
class sFNO_epsilon_v2_updated(pl.LightningModule):
def __init__(self,
stage_list,
features_stage_list,
wavenumber_stage_list,
dim_input = None,
dim_output = None,
proj= None,
lifting=None,
activation="leaky_relu",
norm_layer=GroupNorm,
drop_rate= 0.,
drop_path_rate= 0.,
use_layer_scale=True,
layer_scale_init_value=1e-5,
with_grid=True,
padding=9,
loss = "rel_l2",
learning_rate = 1e-3,
step_size= 70,
gamma= 0.5,
weight_decay= 1e-5,
mlp_ratio=4):
super().__init__()
self.save_hyperparameters()
if loss == 'l1':
self.criterion = nn.L1Loss()
elif loss == 'l2':
self.criterion = nn.MSELoss()
elif loss == 'smooth_l1':
self.criterion = nn.SmoothL1Loss()
elif loss == "rel_l2":
self.criterion =LpLoss()
self.padding = padding
self.with_grid = with_grid
self.padding = padding
self.learning_rate = learning_rate
self.step_size = step_size
self.gamma = gamma
self.weight_decay = weight_decay
if with_grid == True:
dim_input+=2
if lifting is None:
self.lifting = Mlp(in_features=dim_input,
out_features=features_stage_list[0],
hidden_features=features_stage_list[0],
activation=activation)
else:
self.lifting = lifting
if proj is None:
self.proj = Mlp(in_features=features_stage_list[-1],
out_features=dim_output,
hidden_features=features_stage_list[-1],
activation=activation)
else:
self.proj = proj
assert len(features_stage_list) == len(wavenumber_stage_list) == len(stage_list)
network = []
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(stage_list))]
cur = 0
for i in range(len(stage_list)):
stage = self.Ensemble_stage(features_=features_stage_list[i],
index= i,
layers=stage_list,
wavenumber_stage=wavenumber_stage_list[i],
mlp_ratio=mlp_ratio,
activation=activation,
norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=dp_rates[cur:cur+stage_list[i]],
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value)
network.append(stage)
cur += stage_list[i]
self.network = nn.ModuleList(network)
#######################################
def Ensemble_stage(self, features_,
index,
layers,
wavenumber_stage,
mlp_ratio,
activation,
norm_layer,
drop_rate,
drop_path_rate,
use_layer_scale,
layer_scale_init_value,
):
"""
generate the ensemble of blocks
return: NOFormerBlock
"""
blocks = []
for j in range(layers[index]):
blocks.append(NOFormerBlock(features_= features_,
wavenumber= [wavenumber_stage]*2,
norm_layer=norm_layer,
drop= drop_rate,
drop_path= drop_path_rate[j],
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
mlp_ratio=mlp_ratio,
activation=activation))
blocks = nn.Sequential(*blocks)
return blocks
def forward_NOFormer(self, x):
"""
forward the NOFormer
"""
for stage in self.network:
x = stage(x)
return x
def add_grid(self, x):
"""
add grid to the input
"""
grid = get_grid2D(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
del grid
return x
def forward(self, x):
if self.with_grid == True:
x = self.add_grid(x)
x = self.lifting(x.permute(0, 3, 1, 2))
x = F.pad(x, [0,self.padding, 0,self.padding])
x = self.forward_NOFormer(x)
x =x[..., :-self.padding, :-self.padding]
x = self.proj(x)
return x.permute(0, 2, 3, 1)
def training_step(self, batch: torch.Tensor, batch_idx):
# training_step defines the train loop.
# it is independent of forward
x, y = batch
batch_size = x.shape[0]
out= self(x)
loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log("loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, val_batch: torch.Tensor, batch_idx):
x, y = val_batch
batch_size = x.shape[0]
out= self(x)
val_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('val_loss', val_loss, on_epoch=True, prog_bar=True, logger=True)
return val_loss
def test_step(self, test_batch: torch.Tensor, batch_idx):
x, y = test_batch
batch_size = x.shape[0]
out= self(x)
test_loss = self.criterion(out.view(batch_size,-1), y.view(batch_size,-1))
self.log('test_loss', test_loss, on_epoch=True, prog_bar=True, logger=True)
return test_loss
def configure_optimizers(self, optimizer=None, scheduler=None):
if optimizer is None:
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if scheduler is None:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.step_size, gamma=self.gamma)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler
},
} | 10,270 | 38.35249 | 112 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/model_factory.py | from models import *
def choosing_model(config):
c_nn = config["model"]
c_train = config["train"]
# 7 Hz data only contains the real part of the field
if config["Project"]["database"]=='GRF_7Hz':
if config["Project"]["name"] == "FNO":
model =FNO(
wavenumber = c_nn["modes_list"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO":
model =sFNO(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "FNO_residual":
model =FNO_residual(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v1":
model =sFNO_epsilon_v1(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2":
model =sFNO_epsilon_v2(
modes = c_nn["modes_list"],
drop_path_rate = c_nn["drop_path"],
drop = c_nn["drop"],
depths = c_nn["depths"],
dims = c_nn["dims"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
activation = c_nn["activ"]
)
# 12/15 Hz data only contains real and imaginary part of the field
elif config["Project"]["database"]==('GRF_12Hz') or ('GRF_15Hz'):
if config["Project"]["name"] == "FNO":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =FNO(
wavenumber = c_nn["modes_list"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =sFNO(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "FNO_residual":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =FNO_residual(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v1":
Proj = torch.nn.Linear(c_nn["features"], 2, dtype=torch.float)
model =sFNO_epsilon_v1(
wavenumber = c_nn["modes_list"],
drop = c_nn["drop"],
drop_path = c_nn["drop_path"],
features_ = c_nn["features"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2":
#sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2
Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float)
model =sFNO_epsilon_v2_proj(
modes = c_nn["modes_list"],
drop_path_rate = c_nn["drop_path"],
drop = c_nn["drop"],
depths = c_nn["depths"],
dims = c_nn["dims"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
proj = Proj,
activation = c_nn["activ"]
)
elif config["Project"]["name"] == "sFNO+epsilon_v2_updated":
#sFNO_epsilon_v2_proj is the same arch as sFNO_epsilon_v2
#we just allow to have an independent projection layer.
#Proj = torch.nn.Linear(c_nn["dims"][-1], 2, dtype=torch.float)
model =sFNO_epsilon_v2_updated(
stage_list = c_nn["depths"],
features_stage_list = c_nn["dims"],
wavenumber_stage_list = c_nn["modes_list"],
dim_input = 1,
dim_output = 2,
#proj = Proj,
activation = c_nn["activ"],
drop_rate = c_nn["drop"],
drop_path_rate = c_nn["drop_path"],
learning_rate = c_train["lr"],
step_size= c_train["step_size"],
gamma= c_train["gamma"],
weight_decay= c_train["weight_decay"],
)
return model | 7,309 | 42.254438 | 84 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/loss.py | import torch
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y) | 1,326 | 27.234043 | 113 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/loading_data.py | import numpy as np
import torch
from bisect import bisect
import os
from torch.utils.data import Dataset, DataLoader
def to_numpy(x):
return x.detach().cpu().numpy()
#files Loader
def MyLoader(GL, do = "train", config = None, args=None):
if config is not None:
batch_size = config['train']['batchsize']
workers = config['data']['load_workers']
database = config['Project']['database']
if database == 'GRF_7Hz':
size = 128
elif database in {'GRF_12Hz','GRF_15Hz'}:
size = 64
elif args is not None:
batch_size = args.batchsize
workers = args.load_workers
database = args.database
if database == 'GRF_7Hz':
size = 128
elif database in {'GRF_12Hz', 'GRF_15Hz'}:
size = 64
else:
batch_size = 50
workers = 4
if do == 'train':
list_x_train, list_y_train = GL('train')
list_x_valid, list_y_valid = GL('validation')
Train_Data_set = File_Loader(list_x_train,list_y_train, size = size, data=database)
Valid_Data_set = File_Loader(list_x_valid,list_y_valid, size = size, data=database)
##### setting the data Loader
train_loader = DataLoader(dataset = Train_Data_set,
shuffle = True,
batch_size = batch_size,
num_workers= workers)
valid_loader = DataLoader(dataset = Valid_Data_set,
shuffle = False,
batch_size =batch_size,
num_workers= workers)
return train_loader, valid_loader
elif do == 'test':
list_x_test, list_y_test = GL('test')
Test_Data_set = File_Loader(list_x_test, list_y_test, size = size, data=database)
##### setting the data Loader
test_loader = DataLoader(dataset = Test_Data_set,
shuffle = False,
batch_size = batch_size,
num_workers= workers)
return test_loader
class GettingLists(object):
#Generating the list for train/valid/test--> each sample is 5000 velocity/data
def __init__(self, data_for_training,
wave_eq = "acoustic",
data_base = "GRF_7Hz",
PATH = 'databases',
batch_data_size = int(5000)):
super(GettingLists, self).__init__()
self.PATH = os.path.join(PATH, wave_eq, data_base)
self.batch_data = batch_data_size
valid_limit = data_for_training//self.batch_data
self.valid_limit = valid_limit
if data_base == 'GRF_7Hz':
self.end = int(6)
elif data_base in {'GRF_12Hz', 'GRF_15Hz'} :
self.end = int(10)
def get_list(self, do):
if do == 'train':
in_limit_train = np.array([os.path.join(self.PATH,
'model',
f'velocity{k}.npy') for k in \
range(1,self.valid_limit+1)])
out_limit_train = np.array([os.path.join(self.PATH,
'data',
f'pressure{k}.npy')for k in \
range(1,self.valid_limit+1)])
return in_limit_train, out_limit_train
elif do == 'validation':
in_limit_valid = np.array([os.path.join(self.PATH,
'model',
f'velocity{k}.npy') for k in \
range(self.end,self.end+1)])
out_limit_valid= np.array([os.path.join(self.PATH,
'data',
f'pressure{k}.npy') for k in \
range(self.end,self.end+1)])
return in_limit_valid, out_limit_valid
elif do =='test':
in_limit_test = np.array([os.path.join(self.PATH,
'model', f'velocity{k}.npy') for k in \
range(self.valid_limit+1, self.end+1)])
out_limit_test = np.array([os.path.join(self.PATH,
'data', f'pressure{k}.npy')for k in \
range(self.valid_limit+1, self.end+1)])
return in_limit_test, out_limit_test
def __call__(self, do = 'train'):
return self.get_list(do)
class File_Loader(Dataset):
#data loader file
def __init__(self, data_paths, target_paths, size =128, data = "GRF"):
self.size = size
self.data = data
if self.data == "GRF_7Hz":
self.data_memmaps = [np.load(path, mmap_mode='r') for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r') for path in target_paths]
elif self.data == ("GRF_12Hz") or ("GRF_15Hz") :
self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r').view(float) for path in target_paths]
elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"):
self.data_memmaps = [np.load(path, mmap_mode='r').view(float) for path in data_paths]
self.target_memmaps = [np.load(path, mmap_mode='r').view(float).reshape(2,self.size,self.size,2) for path in target_paths]
self.start_indices = [0] * len(data_paths)
self.data_count = 0
for index, memmap in enumerate(self.data_memmaps):
self.start_indices[index] = self.data_count
self.data_count += memmap.shape[0]
def __len__(self):
return self.data_count
def __getitem__(self, index):
memmap_index = bisect(self.start_indices, index) - 1
index_in_memmap = index - self.start_indices[memmap_index]
data = np.copy(self.data_memmaps[memmap_index][index_in_memmap])
target = np.copy(self.target_memmaps[memmap_index][index_in_memmap])
if self.data == "GRF_7Hz":
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,1)
elif self.data == ("GRF_12Hz") or ("GRF_15Hz"):
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(self.size,self.size,2)
elif self.data == ("GRF_12Hz_vz") or ("GRF_15Hz_vz"):
return torch.tensor(data*1e-3, dtype=torch.float).view(self.size,self.size,1), torch.tensor(target, dtype=torch.float).view(2,self.size,self.size,2)
| 6,671 | 43.18543 | 158 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/visualization_code/projection.py | """
Project a model or multiple models to a plane spaned by given directions.
"""
import numpy as np
import torch
import os
import copy
import h5py
import sys
import random
from projection_helper import sizeof, shapeof
sys.path.append('/Users/xmt/code/github/loss-landscape')
import net_plotter
import h5_util
import tqdm
from sklearn.decomposition import PCA, TruncatedSVD
from scipy.linalg import svd
def boolean_query(prompt):
asw = input(prompt)
asw = asw.lower()
if asw=='y' or asw=='yes' or asw=='1' or asw=='t' or asw=='true':
return True
elif asw=='n' or asw=='no' or asw=='0' or asw=='f' or asw=='false':
return False
else:
print('Warning: unrecognized answer. Assuming no.')
return True
def tensorlist_to_tensor(weights):
""" Concatenate a list of tensors into one tensor.
Args:
weights: a list of parameter tensors, e.g. net_plotter.get_weights(net).
Returns:
concatnated 1D tensor
"""
return torch.cat([w.view(w.numel()) if w.dim() > 1 else torch.FloatTensor(w) for w in weights])
def nplist_to_tensor(nplist):
""" Concatenate a list of numpy vectors into one tensor.
Args:
nplist: a list of numpy vectors, e.g., direction loaded from h5 file.
Returns:
concatnated 1D tensor
"""
v = []
for d in nplist:
w = torch.tensor(d*np.float64(1.0))
# Ignoreing the scalar values (w.dim() = 0).
if w.dim() > 1:
v.append(w.view(w.numel()))
elif w.dim() == 1:
v.append(w)
return torch.cat(v)
def npvec_to_tensorlist(direction, params):
""" Convert a numpy vector to a list of tensors with the same shape as "params".
Args:
direction: a list of numpy vectors, e.g., a direction loaded from h5 file.
base: a list of parameter tensors from net
Returns:
a list of tensors with the same shape as base
"""
if isinstance(params, list):
w2 = copy.deepcopy(params)
idx = 0
n = 0
for w in w2:
n = n+w.numel()
w.copy_(torch.tensor(direction[idx:idx + w.numel()]).view(w.size()))
idx += w.numel()
assert(idx == len(direction))
return w2
else:
s2 = []
idx = 0
for (k, w) in params.items():
s2.append(torch.Tensor(direction[idx:idx + w.numel()]).view(w.size()))
idx += w.numel()
assert(idx == len(direction))
return s2
def cal_angle(vec1, vec2):
""" Calculate cosine similarities between two torch tensors or two ndarraies
Args:
vec1, vec2: two tensors or numpy ndarraies
"""
if isinstance(vec1, torch.Tensor) and isinstance(vec1, torch.Tensor):
return torch.dot(vec1, vec2)/(vec1.norm()*vec2.norm()).item()
elif isinstance(vec1, np.ndarray) and isinstance(vec2, np.ndarray):
return np.ndarray.dot(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))
def project_1D(w, d):
""" Project vector w to vector d and get the length of the projection.
Args:
w: vectorized weights
d: vectorized direction
Returns:
the projection scalar
"""
assert len(w) == len(d), 'dimension does not match for w (' + str(len(w)) + ') and d (' + str(len(d)) + ')'
scale = torch.dot(w, d)/d.norm()
return scale.item()
def lift_1D(x, d):
return x*d/d.norm()
def project_2D(d, dx, dy, proj_method):
""" Project vector d to the plane spanned by dx and dy.
Args:
d: vectorized weights
dx: vectorized direction
dy: vectorized direction
proj_method: projection method
Returns:
x, y: the projection coordinates
"""
if proj_method == 'cos':
# when dx and dy are orthorgonal
x = project_1D(d, dx)
y = project_1D(d, dy)
elif proj_method == 'lstsq':
# solve the least squre problem: Ax = d
A = np.vstack([dx.numpy(), dy.numpy()]).T
[x, y] = np.linalg.lstsq(A, d.numpy())[0]
return x, y
def project_3D(d, dx, dy, dz, proj_method):
""" Project vector d to the 3D space spanned by dx, dy, and dz.
Args:
d: vectorized weights
dx: vectorized direction
dy: vectorized direction
dz: vectorized direction
proj_method: projection method
Returns:
x, y, z: the projection coordinates
"""
if proj_method == 'cos':
# when dx and dy are orthorgonal
x = project_1D(d, dx)
y = project_1D(d, dy)
z = project_1D(d, dz)
elif proj_method == 'lstsq':
# solve the least squre problem: Ax = d
A = np.vstack([dx.numpy(), dy.numpy(), dz.numpy()]).T
[x, y, z] = np.linalg.lstsq(A, d.numpy())[0]
return x, y, z
def project_ND(d, axes, proj_method):
""" Project vector d to the space spanned by all axes.
Args:
d: vectorized weights
axes[0, ...]: vectorized direction
proj_method: projection method
Returns:
[x, y, z, ...]: the projection coordinates
"""
ndim = len(axes)
coords = []
if proj_method == 'cos':
# when dx and dy are orthorgonal
for n, axis in enumerate(axes):
coords.append(project_1D(d, axis))
elif proj_method == 'lstsq':
# solve the least squre problem: Ax = d
A = np.vstack([axis.numpy() for axis in axes]).T
coords = np.linalg.lstsq(A, d.numpy())[0]
return coords
def lift_ND(coords, axes, proj_method):
""" Lift coordinates to the embedding space.
Args:
coords: PCA coordinates
axes[0, ...]: basis vectors of PCA space
proj_method: projection method
Returns:
t: vectorized weight difference
"""
ndim = len(axes)
assert (ndim == len(coords))
t = torch.zeros_like(axes[0])
for i, x in enumerate(coords):
t += lift_1D(x, axes[i])
return t
def load_all_directions(dir_file):
""" Load direction(s) from the direction file."""
directions = []
f = h5py.File(dir_file, 'r')
lastdim = 0
while True:
label = 'direction_{}'.format(lastdim)
if label in f.keys():
directions.append(h5_util.read_list(f, label))
lastdim += 1
else:
break
print(f'directions contain {len(directions)} vectors')
return directions
def project_trajectory(args, w, s, callback):
"""
Project the optimization trajectory onto the given two directions.
Args:
args.dir_file: the h5 file that contains the directions
w: weights of the final model
s: states of the final model
model_name: the name of the model
args.steps: the list of available checkpoint indices
args.dir_type: the type of the direction, weights or states
args.proj_method: cosine projection
args.dimension: 2, 3 or higher dimensional plot
callback: method to obtain model from step index
Returns:
proj_file: the projection filename
"""
proj_file = args.dir_file + '_proj_' + args.proj_method + '.h5'
if os.path.exists(proj_file):
replace = input('The projection file exists! Replace?')
if replace:
os.remove(proj_file)
else:
return proj_file
# read directions and convert them to vectors
directions = load_all_directions(args.dir_file)
axes = []
for d in directions:
axes.append(nplist_to_tensor(d))
print(f'directions contains {len(directions)} axes')
ndim = len(directions)
refw = w
w = transform_tensors(w, args.complex)
if args.complex == 'imaginary' or args.complex == 'real':
debug = False
allcoords = [ [] for i in range(ndim) ]
other_coords = [ [] for i in range(ndim) ]
errors = []
pbar = tqdm.tqdm(args.steps, desc='Projecting learning steps', ncols=100)
for step in pbar:
net2 = callback(step)
if args.dir_type == 'weights':
w2 = net_plotter.get_weights(net2)
w2 = transform_tensors(w2, args.complex)
d = net_plotter.get_diff_weights(w, w2)
elif args.dir_type == 'states':
s2 = net2.state_dict()
d = net_plotter.get_diff_states(s, s2)
d = tensorlist_to_tensor(d)
coords = project_ND(d, axes, args.proj_method)
for i in range(ndim):
allcoords[i].append(coords[i])
skip = False
if os.path.exists(proj_file):
skip = boolean_query(f'{proj_file} exists already. Replace? ')
if not skip:
os.remove(proj_file)
if not skip:
f = h5py.File(proj_file, 'w')
for i in range(ndim):
label = 'proj_{:0>2d}coord'.format(i)
f[label] = np.array(allcoords[i])
f.close()
return proj_file
def real_type(w):
if w.dtype is torch.complex64:
return torch.float32
elif w.dtype is torch.complex128:
return torch.float64
else:
return w.dtype
def from_values(t, start, length, wref):
return t[start:start+length].view(wref.shape).view(real_type(wref))
def untransform_tensors(w, refw, what):
weights = []
if what.lower() == 'split':
with torch.no_grad():
for wi, refwi in zip(w, refw):
nrows = wi.shape[0]
realnrows = int(nrows/2)
nrows = realnrows
if refwi.dtype is torch.float32 or refwi.dtype is torch.float64:
# real tensor was padded with as many zeros to signify
weights.append(wi[0:nrows].view(refwi.dtype))
elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128:
# complex tensor was converted to real followed by imaginary values:
re = wi[0:nrows]
im = wi[nrows:]
weights.append(torch.complex(re, im))
else:
raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype))
elif what.lower() == 'real' or what.lower() == 'skip' or what.lower() == 'ignore':
with torch.no_grad():
for wi, refwi in zip(w, refw):
if refwi.dtype is torch.float32 or refwi.dtype is torch.float64:
weights.append(wi)
elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128:
re = wi
im = torch.zeros_like(re)
weights.append(torch.complex(re, im))
else:
raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype))
elif what.lower() == 'imaginary':
at = 0
with torch.no_grad():
for wi, refwi in zip(w, refw):
if refwi.dtype is torch.float32 or refwi.dtype is torch.float64:
weights.append(torch.zeros_like(refwi)) # real values were discarded
elif refwi.dtype is torch.complex64 or refwi.dtype is torch.complex128:
im = wi
re = torch.zeros_like(im)
weights.append(torch.complex(re, im))
else:
raise ValueError('Unrecognized data type for this weight: {}'.format(w.dtype))
else:
raise ValueError('Unrecognized complex flattening name')
return weights
def pca_coords_to_weights(coords, axes, refw, what):
'''
Transform coordinates in PCA space to weights of model
coords: coordinates in PCA space
axes: PCA axes (as vectors)
refw: weights of the final model used as origin of the reference frame
'''
assert(len(coords) == len(axes))
t = torch.zeros_like(axes[0])
# t: lifted version of coords in transformed and flattened space
for c,a in zip(coords, axes):
t += c*a
# w0: list of transformed reference weights
w0 = transform_tensors(refw, what)
# w: list of weight differences between transformed model and transformed ref model
w = npvec_to_tensorlist(t, w0)
# w1: list of transformed model weights
w1 = []
for wi, w0i in zip(w,w0):
w1.append(wi + w0i)
# w2: list of untransformed weights
w2 = untransform_tensors(w1, refw, what)
return w2
def transform_tensor(t, what, verbose=False):
if not isinstance(t, torch.Tensor):
print(f'WARNING: not a tensor type in transform_tensor ({type(t)})')
print(f'size of list: {len(t)}, shape = {shapeof(t)}')
assert False
return
if what.lower() == 'imaginary':
if not torch.is_complex(t):
return None
else:
return t.imag
elif what.lower() == 'split':
if not torch.is_complex(t):
return torch.cat((t, torch.zeros_like(t)), dim=0)
else:
return torch.cat((t.real, t.imag), dim=0)
elif what.lower() == 'ignore' or what.lower() == 'real':
if not torch.is_complex(t):
return t
else:
return t.real
def transform_tensors(t, what, verbose=False):
if verbose:
print(f'entering transform_tensor: what={what}')
if what.lower() == 'keep':
if verbose:
print('leaving tensor (list) unchanged')
return t
elif isinstance(t, list):
t1 = []
for w in t:
w2 = transform_tensor(w, what, verbose)
if w2 is not None:
if verbose:
print('w2 is not None, size={}'.format(w2.numel()))
t1.append(w2)
return t1
else:
return transform_tensor(t, what, verbose)
def setup_PCA_directions(args, callback, w, s, verbose=False):
"""
Find PCA directions for the optimization path from the initial model
to the final trained model.
Returns:
dir_name: the h5 file that stores the directions.
"""
if verbose:
print(f'input tensor w contains {sizeof(w)} values and has shape {shapeof(w)}')
actual_dim = np.min([args.dimension, len(args.steps)])
if actual_dim != args.dimension:
print(f'WARNING: unable to compute {args.dimension} PCA dimensions. Only {actual_dim} will be computed')
args.dimension = actual_dim
# Name the .h5 file that stores the PCA directions.
folder_name = args.path + '/PCA_' + args.dir_type
if args.ignore:
folder_name += '_ignore=' + args.ignore
folder_name += '_save_epoch=' + str(args.steps[-1])
folder_name += '_complex=' + str(args.complex)
folder_name += '_dim=' + str(args.dimension)
os.system('mkdir ' + folder_name)
dir_name = os.path.join(folder_name, 'directions.h5')
if verbose:
print(f'PCA directions computed from learning path will be stored in {dir_name}')
# skip if the direction file exists
if os.path.exists(dir_name):
f = h5py.File(dir_name, 'a')
if 'explained_variance_' in f.keys():
f.close()
return dir_name
# load models and prepare the optimization path matrix
matrix = []
wsave = w
# we will work with the transformed (real) version of the models
w = transform_tensors(w, args.complex)
pbar = tqdm.tqdm(args.steps, ncols=100, desc='Loading training steps')
for step in pbar:
pbar.set_description('step #{}'.format(step))
net2 = callback(step)
if args.dir_type == 'weights':
w2 = net_plotter.get_weights(net2)
display = random.random() < 0.1
if verbose:
print('transforming tensor {}'.format(shapeof(w2)))
# compute real version of the weights
w2 = transform_tensors(w2, args.complex)
if verbose:
print('into tensor {}'.format(shapeof(w2)))
d = net_plotter.get_diff_weights(w, w2)
elif args.dir_type == 'states':
s2 = net2.state_dict()
d = net_plotter.get_diff_states(s, s2)
if args.ignore == 'biasbn':
net_plotter.ignore_biasbn(d)
d = tensorlist_to_tensor(d)
if verbose:
print('converting that tensor into {}'.format(shapeof(d)))
if d is not None:
matrix.append(d.numpy())
# Perform PCA on the optimization path matrix
if verbose:
print ("Perform PCA on the models")
matrix = np.array(matrix)
if verbose:
print(matrix.shape)
A = torch.from_numpy(matrix)
_U, _S, _V = torch.pca_lowrank(A, q=args.dimension, center=True)
covar = torch.square(_S)/(len(args.steps)-1)
principal_directions = _V.numpy()
pcs = []
for i in range(principal_directions.shape[1]):
pcs.append(np.array(principal_directions[:,i]))
if verbose:
print(f'there are {len(pcs)} principal components')
# convert vectorized directions to the same shape as models to save in h5 file.
if verbose:
print(f'type of w is {type(w)}')
xi_directions = []
if args.dir_type == 'weights':
for pc in pcs:
xi_directions.append(npvec_to_tensorlist(pc, w))
elif args.dir_type == 'states':
for pc in pcs:
xi_directions.append(npvec_to_tensorlist(pc, s))
if args.ignore == 'biasbn':
for xd in xi_directions:
net_plotter.ignore_biasbn(xd)
if verbose:
print(f'dir_name={dir_name}')
if os.path.exists(dir_name):
replace = boolean_query(f'{dir_name} exists already. Replace? ')
if replace:
os.remove(dir_name)
else:
return dir_name
f = h5py.File(dir_name, 'w')
for i, xd in enumerate(xi_directions):
label = 'direction_{}'.format(i)
h5_util.write_list(f, label, xd)
f['singular_values_'] = _S
f['covariance_values'] = covar
f.close()
if verbose:
print ('transformed PCA directions saved in: %s' % dir_name)
complexdir_name = dir_name[:-4] + '_complex.h5'
f = h5py.File(complexdir_name, 'w')
for i, xd in enumerate(xi_directions):
label = 'direction_{}'.format(i)
x = untransform_tensors(xd, wsave, args.complex)
if verbose:
print(f'after untransformation:\n\tx={shapeof(x)}\n\treference={shapeof(wsave)}')
h5_util.write_list(f, label, x)
f.close()
return dir_name
| 18,597 | 31.742958 | 112 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/visualization_code/create_surface.py | """
Calculate the loss surface in parallel.
Code adapted from Tom Goldstein's implementation of the 2018 NeurIPS paper:
Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer and Tom Goldstein.
Visualizing the Loss Landscape of Neural Nets. NIPS, 2018.
Github: https://github.com/tomgoldstein/loss-landscape
Given PCA directions, the code samples the loss associeted with models
whose weights lie in the corresponding two-dimensional weight
parameterization plane.
"""
import numpy as np
import torch
import copy
import math
import h5py
import os
import argparse
import sys
import json
import csv
# import Tom Goldstein's loss-landscape library
from loss_landscape import plot_2D, plot_1D, net_plotter, mpi4pytorch,
scheduler, create_trajectory
sys.path.append('../')
from main import choosing_model, datasetFactory
import yaml
import utilities
from projection import setup_PCA_directions, project_trajectory
from projection_helper import get_loader
from create_trajectory import evaluate
import plot_2D
import time
import socket
import sys
import numpy as np
import torchvision
import torch.nn as nn
import tqdm
import dataloader
import evaluation
import projection as proj
from projection import shapeof, sizeof
import plotter_helper as plotter
import plot_2D
import plot_1D
import model_loader
import scheduler
import mpi4pytorch as mpi
def name_surface_file(args, dir_file):
# skip if surf_file is specified in args
if args.surf_file:
return args.surf_file
# use args.dir_file as the prefix
surf_file = dir_file
# resolution
surf_file += '_[%s,%s,%d]' % (str(args.xmin), str(args.xmax), int(args.xnum))
if args.y:
surf_file += 'x[%s,%s,%d]' % (str(args.ymin), str(args.ymax), int(args.ynum))
# dataloder parameters
if args.raw_data: # without data normalization
surf_file += '_rawdata'
if args.data_split > 1:
surf_file += '_datasplit=' + str(args.data_split) + '_splitidx=' + str(args.split_idx)
return surf_file + ".h5"
def setup_surface_file(args, surf_file, dir_file):
print('-------------------------------------------------------------------')
print('setup_surface_file')
print('-------------------------------------------------------------------')
print('surf_file is {}'.format(surf_file))
print('dir_file is {}'.format(dir_file))
# skip if the direction file already exists
if os.path.exists(surf_file):
f = h5py.File(surf_file, 'r')
if (args.y and 'ycoordinates' in f.keys()) or 'xcoordinates' in f.keys():
f.close()
print ("%s is already set up" % surf_file)
return
f = h5py.File(surf_file, 'a' if os.path.exists(surf_file) else 'w')
f['dir_file'] = dir_file
# Create the coordinates(resolutions) at which the function is evaluated
xcoordinates = np.linspace(args.xmin, args.xmax, num=int(args.xnum))
f['xcoordinates'] = xcoordinates
if args.y:
ycoordinates = np.linspace(args.ymin, args.ymax, num=int(args.ynum))
f['ycoordinates'] = ycoordinates
f.close()
return surf_file
def to_path(checkpath, config):
return os.path.join(checkpath, config['ckpt']['save_dir'])
def to_filename(checkpath, config, filename):
return os.path.join(to_path(checkpath, config), filename)
def crunch(surf_file, net, w, s, d, loss_key, comm, rank, args, samples, loss_func):
"""
Calculate the loss values of modified models in parallel
using MPI. Each individual rank saves its results in a separate
csv file that can then be consolidated into a surface geometry using
'combine.py'
"""
coords = samples[rank]
print('Computing %d values for rank %d'% (len(coords), rank))
start_time = time.time()
total_sync = 0.0
fname = surf_file + f'_rank={rank}.csv'
if os.path.exists(fname):
print(f'Creating new filename since {fname} already exists')
tstr = time.asctime(time.gmtime(time.time())).replace(' ', '_')
fname = surf_file + f'{tstr}_rank={rank}.csv'
# Note: the CSV file cannot stay open otherwise changes will only be
# recorded upon completion of the loop. Given the odds that a MPI
# job is cut short on an HPC architecture, we elect instead to save
# each loss value in a csv file as soon as it is computed.
with open(fname, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=['x', 'y', 'loss', 'time'])
writer.writeheader()
# Loop over all uncalculated loss values
pbar = tqdm.tqdm(coords, total=len(coords), ncols=100, desc=f'Sampling loss surface for rank {rank}')
for c in pbar:
# Load the weights corresponding to those coordinates into the net
if args.dir_type == 'weights':
plotter.set_weights(net.module if args.ngpu > 1 else net, w, d, c)
elif args.dir_type == 'states':
plotter.set_states(net.module if args.ngpu > 1 else net, s, d, c)
# Record the time to compute the loss value
loss_start = time.time()
loss, mse = loss_func(net, rank)
loss_compute_time = time.time() - loss_start
with open(fname, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=['x', 'y', 'loss', 'time'])
writer.writerow({'x': c[0], 'y': c[1], 'loss': loss, 'time': loss_compute_time})
total_time = time.time() - start_time
print('Rank %d done! Total time: %.2f' % (rank, total_time)
###############################################################
# MAIN
###############################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plotting loss surface',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mpi', '-m', action='store_true', help='use mpi')
parser.add_argument('--cuda', action='store_true', help='use cuda')
parser.add_argument('--threads', default=2, type=int, help='number of threads')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use for each rank, useful for data parallel evaluation')
parser.add_argument('--batch_size', default=128, type=int, help='minibatch size')
# data parameters
# model parameters
parser.add_argument('-p', '--path', type=str, required=True, help='Path to checkpoint files')
parser.add_argument('-c','--config_file', type=str, required=True,
help='Path to the model configuration file')
parser.add_argument('--filename', help='Filename of final model')
parser.add_argument('--model_name', default='dummy model', help='model name')
parser.add_argument('--loss_name', '-l', default='mse', help='loss functions: crossentropy | mse')
parser.add_argument('--skip', default=None, type=int, help='Were to resume computation on all ranks')
parser.add_argument('--samples', default=None, type=str, help='File containing the explicit list of surface locations to sample')
# direction parameters
parser.add_argument('--dir_file', required=True, help='specify the name of direction file, or the path to an existing direction file')
parser.add_argument('--dir_type', default='weights', help='direction type: weights | states (including BN\'s running_mean/var)')
parser.add_argument('--x', type=float, nargs=3, default='-1 1 51', help='xmin xmax xnum')
parser.add_argument('--y', type=float, nargs=3, default='-1 1 51', help='ymin ymax ynum')
parser.add_argument('--testing', action='store_true', help='Sample testing loss (default: training loss)')
parser.add_argument('--xnorm', default='', help='direction normalization: filter | layer | weight')
parser.add_argument('--ynorm', default='', help='direction normalization: filter | layer | weight')
parser.add_argument('--xignore', default='', help='ignore bias and BN parameters: biasbn')
parser.add_argument('--yignore', default='', help='ignore bias and BN parameters: biasbn')
parser.add_argument('--surf_file', default='', help='customize the name of surface file, could be an existing file.')
# plot parameters
parser.add_argument('--proj_file', default='', help='the .h5 file contains projected optimization trajectory.')
parser.add_argument('--loss_max', default=5, type=float, help='Maximum value to show in 1D plot')
parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map')
parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map')
parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel')
parser.add_argument('--show', action='store_true', default=False, help='show plotted figures')
parser.add_argument('--log', action='store_true', default=False, help='use log scale for loss values')
parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation')
args = parser.parse_args()
args.raw_data = False
args.data_split = 0
# reproducibility is already available by default in data setup
# torch.manual_seed(10)
#--------------------------------------------------------------------------
# Environment setup
#--------------------------------------------------------------------------
torch.set_num_threads(4)
if args.mpi:
comm = mpi.setup_MPI()
rank, nproc = comm.Get_rank(), comm.Get_size()
else:
comm, rank, nproc = None, 0, 1
# in case of multiple GPUs per node, set the GPU to use for each rank
if args.cuda:
device = torch.device('cuda')
if not torch.cuda.is_available():
raise Exception('User selected cuda option, but cuda is not available on this machine')
gpu_count = torch.cuda.device_count()
torch.cuda.set_device(rank % gpu_count)
print('Rank %d use GPU %d of %d GPUs on %s' %
(rank, torch.cuda.current_device(), gpu_count, socket.gethostname()))
else:
device = torch.device('cpu')
#--------------------------------------------------------------------------
# Check plotting resolution
#--------------------------------------------------------------------------
try:
if args.x is not None and args.y is not None:
args.xmin, args.xmax, args.xnum = args.x
args.xnum = int(args.xnum)
args.ymin, args.ymax, args.ynum = args.y # (None, None, None)
args.ynum = int(args.ynum)
print(f'Surface sampling bounds: [{args.xmin}, {args.xmax}] x [{args.ymin}, {args.ymax}]')
print(f'Sampling density: {args.xnum} x {args.ynum} = {args.xnum *args.ynum} samples')
except:
raise Exception('Improper format for x- or y-coordinates. Try something like -1 1 51') #-1:1:51')
#--------------------------------------------------------------------------
# Load models and extract parameters
#--------------------------------------------------------------------------
data = None
if args.testing:
loss_label = 'test_loss'
else:
loss_label = 'train_loss'
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
c_save = config["ckpt"]
args.path = os.path.join(args.path, config['ckpt']['save_dir'])
model = choosing_model(config)
if args.testing:
dataloader = get_loader(config, train=False, prefix='../')
else:
dataloader, _ = get_loader(config, train=True, prefix='../')
myloss = utilities.LpLoss(size_average=False)
#--------------------------------------------------------------------------
# load the final model
#--------------------------------------------------------------------------
final_model = os.path.join(args.path, args.filename)
checkpoint = torch.load(final_model, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
if args.cuda:
model.cuda()
model.eval()
w = plotter.get_weights(model) # initial parameters
s = copy.deepcopy(model.state_dict()) # deepcopy since state_dict are references
if args.cuda and args.ngpu > 1:
# data parallel with multiple GPUs on a single node
model = nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
if args.samples is not None:
print('Importing sampling locations from file')
with open(args.samples, 'r') as fp:
coords = json.load(fp)
#print(f'{len(coords)} coordinates will be sampled')
#print(f'type(coords) is {type(coords)}')
#print(f'type(coords[0]) is {type(coords[0])}')
#print(f'type(coords[0][0]) is {type(coords[0][0])}')
elif args.x is not None and args.y is not None:
coords = [ (x, y) for x in np.linspace(args.xmin, args.xmax, args.xnum) for y in np.linspace(args.ymin, args.ymax, args.ynum)]
#print(f'type(coords) is {type(coords)}')
#print(f'type(coords[0]) is {type(coords[0])}')
#print(f'type(coords[0][0]) is {type(coords[0][0])}')
else:
raise ValueError('Missing information to determine sampling locations')
n_per_rank = len(coords) // nproc
rem = len(coords) - n_per_rank*nproc
n_per_rank_0 = n_per_rank + rem
print(f'each rank will sample {n_per_rank} positions')
print('Assigning samples to ranks')
counter = 0
samples = [ [] for i in range(nproc) ]
samples[0] = coords[0:n_per_rank_0]
for r in range(1,nproc):
samples[r] = coords[n_per_rank_0 + (r-1)*n_per_rank : n_per_rank_0 + r*n_per_rank]
print('done')
if args.samples is None and args.skip is not None:
for r in range(0, nproc):
samples[r] = samples[r][args.skip:]
# for i in range(n_per_rank+n_per_rank_extra):
# samples[0].append(coords[i])
# counter += 1
# for r in range(1, nproc):
# for i in range(n_per_rank):
# samples[r].append(coords[counter])
# counter += 1
#--------------------------------------------------------------------------
# Setup the direction file and the surface file
#--------------------------------------------------------------------------
dir_file = os.path.join(args.path, args.dir_file)
if not args.surf_file:
args.surf_file = f'{args.dir_file}_surface_[{args.xmin}-{args.xmax}]x[{args.ymin}-{args.ymax}]_{args.xnum}x{args.ynum}.h5'
surf_file = os.path.join(args.path, args.surf_file)
#if rank == 0:
# setup_surface_file(args, surf_file, dir_file)
# wait until master has setup the direction file and surface file
#mpi.barrier(comm)
# load directions
directions = plotter.load_directions(dir_file)
print(f'type(directions) is {type(directions)}')
print(f'type(directions[0]) is {type(directions[0])}')
print(f'type(directions[0][0]) is {type(directions[0][0])}')
# calculate the cosine similarity of the two directions
if False and len(directions) == 2 and rank == 0:
similarity = proj.cal_angle(proj.nplist_to_tensor(directions[0]), proj.nplist_to_tensor(directions[1]))
print('cosine similarity between x-axis and y-axis: %f' % similarity)
class loss_callback:
def __init__(self, dataset, loss, train, cuda):
self.dataset = dataset
self.loss = loss
self.train = train
self.cuda = cuda
def __call__(self, model, rank=0, verbose=False):
return evaluate(model, self.dataset, self.loss, train=self.train, verbose=verbose, cuda=self.cuda, rank=rank)
#--------------------------------------------------------------------------
# Start the computation
#--------------------------------------------------------------------------
crunch(surf_file, model, w, s, directions, loss_label, comm, rank, args, samples, loss_func=loss_callback(dataloader, myloss, not args.testing, args.cuda))
#--------------------------------------------------------------------------
# Plot figures
#--------------------------------------------------------------------------
if args.plot and rank == 0:
if args.y and args.proj_file:
plot_2D.plot_contour_trajectory(surf_file, dir_file,
args.proj_file, loss_label, vmin=args.vmin, vmax=args.vmax,
vlevel=args.vlevel, show=args.show)
elif args.y:
plot_2D.plot_2d_contour(surf_file, loss_label, args.vmin,
args.vmax, args.vlevel, args.show)
else:
plot_1D.plot_1d_loss_err(surf_file, args.xmin, args.xmax,
args.loss_max, args.log, args.show)
| 16,827 | 42.25964 | 159 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/visualization_code/projection_helper.py | import torch
import h5py
import sys
import os
sys.path.append('../')
import utilities
def sizeof(t):
n = 0
if isinstance(t, list):
for w in t:
n += w.numel()
elif isinstance(t, torch.Tensor):
n = t.numel()
elif isinstance(t, h5py.Dataset):
n = t.size
else:
assert False
print(f'Unrecognized object of type {type(t)}')
return n
def shapeof(t):
sh = []
if isinstance(t, list):
for w in t:
sh.append(shapeof(w))
else:
sh.append([t.shape, sizeof(t), t.dtype])
return sh
def get_loader(config, train=False, prefix=''):
if train:
what = 'train'
else:
what = 'test'
c_data =config["data"]
if prefix:
path = os.path.join(prefix, c_data['PATH'])
else:
path = c_data['PATH']
gl = utilities.GettingLists(data_for_training=c_data["n_sample"],
wave_eq = c_data["PDE_type"],
data_base = c_data["process"],
PATH = path)
return utilities.MyLoader(GL=gl, do = what, config=config)
| 1,144 | 23.361702 | 69 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/visualization_code/create_trajectory.py | import numpy as np
import torch
import copy
import math
import h5py
import os
import argparse
import sys
import json
import tqdm
'''
Code adapted from Tom Goldstein's implementation of the 2018 NeurIPS paper:
Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer and Tom Goldstein.
Visualizing the Loss Landscape of Neural Nets. NIPS, 2018.
Github: https://github.com/tomgoldstein/loss-landscape
Given a series of models corresponding to learning steps, compute the PCA of
the models' weights, treated as giant parameter vectors. The first few
principal components, along with the final model, can be used to create a 2D
reference frame upon which the steps can be projected.
'''
sys.path.append('../')
from main import choosing_model, datasetFactory
import yaml
import utilities
from loss_landscape import net_plotter, plot_2D
from projection import setup_PCA_directions, project_trajectory
from scatterplotmatrix import scatterplot_matrix as splom
def evaluate(model, dataloader, loss, train=False, cuda=False, verbose=False):
the_loss = 0.
if train:
lossname = 'training'
else:
lossname = 'testing'
with torch.no_grad():
pbar = tqdm.tqdm(dataloader, ncols=100, desc=f'Computing {lossname} loss')
for x, y in pbar:
batch_size, s= x.shape[0:2]
if cuda:
x, y = x.cuda(), y.cuda()
out = model(x).reshape(batch_size, s, s)
loss_test = loss(out.view(batch_size,-1), y.view(batch_size,-1))
the_loss += loss_test.item()
the_loss = the_loss / len(dataloader.dataset)
if verbose:
print(f'loss = {the_loss}')
return the_loss, 0
def to_path(checkpath, config):
return os.path.join(checkpath, config['ckpt']['save_dir'])
def step_to_filename(checkpath, config, basename, step):
return os.path.join(to_path(checkpath, config), basename.format(step))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot optimization trajectory',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c','--config_file', type=str, required=True,
help='Path to the model configuration file')
parser.add_argument('-p', '--path', type=str, required=True, help='Path to checkpoint files')
parser.add_argument('--dir_type', default='weights',
help="""direction type: weights (all weights except bias and BN paras) |
states (include BN.running_mean/var)""")
parser.add_argument('--ignore', action='store_true', help='ignore bias and BN paras: biasbn (no bias or bn)')
parser.add_argument('--complex', type=str, default='split', help='Method to handle imaginary part of complex weights (split/both, ignore/real, keep/same, imaginary)')
parser.add_argument('--filename', help='Regex filename for checkpoint modes_list')
parser.add_argument('--steps', type=int, nargs='+', help='list of all available step ids')
parser.add_argument('--dir_file', help='load/save the direction file for projection')
parser.add_argument('--proj_method', type=str, default='cos', help='Projection method onto PCA coordinates')
parser.add_argument('--dimension', type=int, default=2, help='Spatial dimensions in which to draw curve')
parser.add_argument('--debug', action='store_true', help='Run verification code for PCA projection forward and backward')
parser.add_argument('--verbose', action='store_true', help='Select verbose output')
args = parser.parse_args()
config_file = args.config_file
with open(config_file, 'r') as stream:
config = yaml.load(stream, yaml.FullLoader)
c_save = config["ckpt"]
model = choosing_model(config)
test_dataloader = datasetFactory(config, train=False, prefix='../')
myloss = utilities.LpLoss(size_average=False)
#--------------------------------------------------------------------------
# load the final model
#--------------------------------------------------------------------------
last_id = args.steps[-1]
final_model = step_to_filename(args.path, config, args.filename, last_id)
checkpoint = torch.load(final_model, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
w = net_plotter.get_weights(model)
s = model.state_dict()
#--------------------------------------------------------------------------
# collect models to be projected
#--------------------------------------------------------------------------
model_files = {}
for epoch in args.steps:
model_file = step_to_filename(args.path, config, args.filename, epoch)
if not os.path.exists(model_file):
print('model %s does not exist' % model_file)
exit(-1)
else:
model_files[epoch] = model_file
def callback(step):
name = model_files[step]
model2 = choosing_model(config)
checkpoint = torch.load(name, map_location=lambda storage, loc: storage)
model2.load_state_dict(checkpoint['state_dict'])
# model2.cuda()
model2.eval()
return model2
#--------------------------------------------------------------------------
# load or create projection directions
#--------------------------------------------------------------------------
args.path = to_path(args.path, config)
if not args.dir_file:
print('computing PCA directions for {} models'.format(len(args.steps)))
args.dir_file = setup_PCA_directions(args, callback, w, s, verbose=args.verbose)
print(f'dir_file={args.dir_file}')
#--------------------------------------------------------------------------
# projection trajectory to given directions
#--------------------------------------------------------------------------
proj_file = project_trajectory(args, w, s, callback)
| 5,937 | 41.113475 | 170 | py |
XGBOD | XGBOD-master/xgbod_demo.py | '''
Demo codes for XGBOD.
Author: Yue Zhao
notes: the demo code simulates the use of XGBOD with some changes to expedite
the execution. Use the full code for the production.
'''
import os
import random
import scipy.io as scio
import numpy as np
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from xgboost.sklearn import XGBClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from models.utility import get_precn, print_baseline
from models.generate_TOS import get_TOS_knn
from models.generate_TOS import get_TOS_loop
from models.generate_TOS import get_TOS_lof
from models.generate_TOS import get_TOS_svm
from models.generate_TOS import get_TOS_iforest
from models.generate_TOS import get_TOS_hbos
from models.select_TOS import random_select, accurate_select, balance_select
# load data file
# mat = scio.loadmat(os.path.join('datasets', 'speech.mat'))
mat = scio.loadmat(os.path.join('datasets', 'arrhythmia.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'cardio.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'letter.mat'))
# mat = scio.loadmat(os.path.join('datasets', 'mammography.mat'))
X = mat['X']
y = mat['y']
# use unit norm vector X improves knn, LoOP, and LOF results
scaler = StandardScaler().fit(X)
# X_norm = scaler.transform(X)
X_norm = normalize(X)
feature_list = []
# Running KNN-base algorithms to generate addtional features
# predefined range of k
k_range = [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150,
200, 250]
# predefined range of k to be used with LoOP due to high complexity
k_range_short = [1, 3, 5, 10]
# validate the value of k
k_range = [k for k in k_range if k < X.shape[0]]
# predefined range of nu for one-class svm
nu_range = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]
# predefined range for number of estimators in isolation forests
n_range = [10, 20, 50, 70, 100, 150, 200, 250]
##############################################################################
# Generate TOS using KNN based algorithms
feature_list, roc_knn, prc_n_knn, result_knn = get_TOS_knn(X_norm, y, k_range,
feature_list)
# Generate TOS using LoOP
feature_list, roc_loop, prc_n_loop, result_loop = get_TOS_loop(X, y,
k_range_short,
feature_list)
# Generate TOS using LOF
feature_list, roc_lof, prc_n_lof, result_lof = get_TOS_lof(X_norm, y, k_range,
feature_list)
# Generate TOS using one class svm
feature_list, roc_ocsvm, prc_n_ocsvm, result_ocsvm = get_TOS_svm(X, y,
nu_range,
feature_list)
# Generate TOS using isolation forests
feature_list, roc_if, prc_n_if, result_if = get_TOS_iforest(X, y, n_range,
feature_list)
# Generate TOS using isolation forests
feature_list, roc_hbos, prc_n_hbos, result_hbos = get_TOS_hbos(X, y, k_range,
feature_list)
##############################################################################
# combine the feature space by concanating various TOS
X_train_new_orig = np.concatenate(
(result_knn, result_loop, result_lof, result_ocsvm, result_if), axis=1)
X_train_all_orig = np.concatenate((X, X_train_new_orig), axis=1)
# combine ROC and Precision@n list
roc_list = roc_knn + roc_loop + roc_lof + roc_ocsvm + roc_if
prc_n_list = prc_n_knn + prc_n_loop + prc_n_lof + prc_n_ocsvm + prc_n_if
# get the results of baselines
print_baseline(X_train_new_orig, y, roc_list, prc_n_list)
##############################################################################
# select TOS using different methods
p = 10 # number of selected TOS
# random selection
# please be noted the actual random selection happens within the
# train-test split, with p repetitions.
X_train_new_rand, X_train_all_rand = random_select(X, X_train_new_orig,
roc_list, p)
# accurate selection
X_train_new_accu, X_train_all_accu = accurate_select(X, X_train_new_orig,
roc_list, p)
# balance selection
X_train_new_bal, X_train_all_bal = balance_select(X, X_train_new_orig,
roc_list, p)
###############################################################################
# build various classifiers
# it is noted that the data split should happen as the first stage
# test data should not be exposed. However, with a relatively large number of
# repetitions, the demo code would generate a similar result.
# the full code uses the containers to save the intermediate TOS models. The
# codes would be shared after the cleanup.
ite = 30 # number of iterations
test_size = 0.4 # training = 60%, testing = 40%
result_dict = {}
clf_list = [XGBClassifier(), LogisticRegression(penalty="l1"),
LogisticRegression(penalty="l2")]
clf_name_list = ['xgb', 'lr1', 'lr2']
# initialize the result dictionary
for clf_name in clf_name_list:
result_dict[clf_name + 'ROC' + 'o'] = []
result_dict[clf_name + 'ROC' + 's'] = []
result_dict[clf_name + 'ROC' + 'n'] = []
result_dict[clf_name + 'PRC@n' + 'o'] = []
result_dict[clf_name + 'PRC@n' + 's'] = []
result_dict[clf_name + 'PRC@n' + 'n'] = []
for i in range(ite):
s_feature_rand = random.sample(range(0, len(roc_list)), p)
X_train_new_rand = X_train_new_orig[:, s_feature_rand]
X_train_all_rand = np.concatenate((X, X_train_new_rand), axis=1)
original_len = X.shape[1]
# use all TOS
X_train, X_test, y_train, y_test = train_test_split(X_train_all_orig, y,
test_size=test_size)
# # use Random Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_rand, y,
# test_size=test_size)
# # use Accurate Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_accu, y,
# test_size=test_size)
# # use Balance Selection
# X_train, X_test, y_train, y_test = train_test_split(X_train_all_bal, y,
# test_size=test_size)
# use original features
X_train_o = X_train[:, 0:original_len]
X_test_o = X_test[:, 0:original_len]
X_train_n = X_train[:, original_len:]
X_test_n = X_test[:, original_len:]
for clf, clf_name in zip(clf_list, clf_name_list):
print('processing', clf_name, 'round', i + 1)
if clf_name != 'xgb':
clf = BalancedBaggingClassifier(base_estimator=clf,
ratio='auto',
replacement=False)
# fully supervised
clf.fit(X_train_o, y_train.ravel())
y_pred = clf.predict_proba(X_test_o)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 'o'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 'o'].append(prec_n)
# unsupervised
clf.fit(X_train_n, y_train.ravel())
y_pred = clf.predict_proba(X_test_n)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 'n'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 'n'].append(prec_n)
# semi-supervised
clf.fit(X_train, y_train.ravel())
y_pred = clf.predict_proba(X_test)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'ROC' + 's'].append(roc_score)
result_dict[clf_name + 'PRC@n' + 's'].append(prec_n)
for eva in ['ROC', 'PRC@n']:
print()
for clf_name in clf_name_list:
print(np.round(np.mean(result_dict[clf_name + eva + 'o']), decimals=4),
eva, clf_name, 'original features')
print(np.round(np.mean(result_dict[clf_name + eva + 'n']), decimals=4),
eva, clf_name, 'TOS only')
print(np.round(np.mean(result_dict[clf_name + eva + 's']), decimals=4),
eva, clf_name, 'original feature + TOS')
| 8,726 | 39.21659 | 79 | py |
XGBOD | XGBOD-master/xgbod_full.py | import os
import pandas as pd
import numpy as np
import scipy.io as scio
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from xgboost.sklearn import XGBClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from PyNomaly import loop
from models.knn import Knn
from models.utility import get_precn, print_baseline
# use one dataset at a time; more datasets could be added to /datasets folder
# the experiment codes use a bit more setting up, otherwise the
# exact reproduction is infeasible. Clean-up codes are going to be moved
# load data file
mat = scio.loadmat(os.path.join('datasets', 'letter.mat'))
ite = 30 # number of iterations
test_size = 0.4 # training = 60%, testing = 40%
X_orig = mat['X']
y_orig = mat['y']
# outlier percentage
out_perc = np.count_nonzero(y_orig) / len(y_orig)
# define classifiers to use
clf_list = [XGBClassifier(), LogisticRegression(penalty="l1"),
LogisticRegression(penalty="l2")]
clf_name_list = ['xgb', 'lr1', 'lr2']
# initialize the container to store the results
result_dict = {}
# initialize the result dictionary
for clf_name in clf_name_list:
result_dict[clf_name + 'roc' + 'o'] = []
result_dict[clf_name + 'roc' + 's'] = []
result_dict[clf_name + 'roc' + 'n'] = []
result_dict[clf_name + 'precn' + 'o'] = []
result_dict[clf_name + 'precn' + 's'] = []
result_dict[clf_name + 'precn' + 'n'] = []
for t in range(ite):
print('\nProcessing trial', t + 1, 'out of', ite)
# split X and y for training and validation
X, X_test, y, y_test = train_test_split(X_orig, y_orig,
test_size=test_size)
# reserve the normalized data
scaler = Normalizer().fit(X)
X_norm = scaler.transform(X)
X_test_norm = scaler.transform(X_test)
feature_list = []
# predefined range of K
k_list_pre = [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90,
100, 150, 200, 250]
# trim the list in case of small sample size
k_list = [k for k in k_list_pre if k < X.shape[0]]
###########################################################################
train_knn = np.zeros([X.shape[0], len(k_list)])
test_knn = np.zeros([X_test.shape[0], len(k_list)])
roc_knn = []
prec_n_knn = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='largest')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_' + str(k))
roc_knn.append(roc)
prec_n_knn.append(prec_n)
train_knn[:, i] = train_score
test_knn[:, i] = pred_score.ravel()
###########################################################################
train_knn_mean = np.zeros([X.shape[0], len(k_list)])
test_knn_mean = np.zeros([X_test.shape[0], len(k_list)])
roc_knn_mean = []
prec_n_knn_mean = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='mean')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn_mean roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_mean_' + str(k))
roc_knn_mean.append(roc)
prec_n_knn_mean.append(prec_n)
train_knn_mean[:, i] = train_score
test_knn_mean[:, i] = pred_score.ravel()
###########################################################################
train_knn_median = np.zeros([X.shape[0], len(k_list)])
test_knn_median = np.zeros([X_test.shape[0], len(k_list)])
roc_knn_median = []
prec_n_knn_median = []
for i in range(len(k_list)):
k = k_list[i]
clf = Knn(n_neighbors=k, contamination=out_perc, method='median')
clf.fit(X_norm)
train_score = clf.decision_scores
pred_score = clf.decision_function(X_test_norm)
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('knn_median roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('knn_median_' + str(k))
roc_knn_median.append(roc)
prec_n_knn_median.append(prec_n)
train_knn_median[:, i] = train_score
test_knn_median[:, i] = pred_score.ravel()
###########################################################################
train_lof = np.zeros([X.shape[0], len(k_list)])
test_lof = np.zeros([X_test.shape[0], len(k_list)])
roc_lof = []
prec_n_lof = []
for i in range(len(k_list)):
k = k_list[i]
clf = LocalOutlierFactor(n_neighbors=k)
clf.fit(X_norm)
# save the train sets
train_score = clf.negative_outlier_factor_ * -1
# flip the score
pred_score = clf._decision_function(X_test_norm) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('lof roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('lof_' + str(k))
roc_lof.append(roc)
prec_n_lof.append(prec_n)
train_lof[:, i] = train_score
test_lof[:, i] = pred_score
###########################################################################
# Noted that LoOP is not really used for prediction since its high
# computational complexity
# However, it is included to demonstrate the effectiveness of XGBOD only
df_X = pd.DataFrame(np.concatenate([X_norm, X_test_norm], axis=0))
# predefined range of K
k_list = [1, 5, 10, 20]
train_loop = np.zeros([X.shape[0], len(k_list)])
test_loop = np.zeros([X_test.shape[0], len(k_list)])
roc_loop = []
prec_n_loop = []
for i in range(len(k_list)):
k = k_list[i]
clf = loop.LocalOutlierProbability(df_X, n_neighbors=k).fit()
score = clf.local_outlier_probabilities.astype(float)
# save the train sets
train_score = score[0:X.shape[0]]
# flip the score
pred_score = score[X.shape[0]:]
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('loop roc pren @ {k} is {roc} {pren}'.format(k=k, roc=roc,
pren=prec_n))
feature_list.append('loop_' + str(k))
roc_loop.append(roc)
prec_n_loop.append(prec_n)
train_loop[:, i] = train_score
test_loop[:, i] = pred_score
##########################################################################
nu_list = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]
train_svm = np.zeros([X.shape[0], len(nu_list)])
test_svm = np.zeros([X_test.shape[0], len(nu_list)])
roc_svm = []
prec_n_svm = []
for i in range(len(nu_list)):
nu = nu_list[i]
clf = OneClassSVM(nu=nu)
clf.fit(X)
train_score = clf.decision_function(X) * -1
pred_score = clf.decision_function(X_test) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('svm roc / pren @ {nu} is {roc} {pren}'.format(nu=nu, roc=roc,
pren=prec_n))
feature_list.append('svm_' + str(nu))
roc_svm.append(roc)
prec_n_svm.append(prec_n)
train_svm[:, i] = train_score.ravel()
test_svm[:, i] = pred_score.ravel()
###########################################################################
n_list = [10, 20, 50, 70, 100, 150, 200, 250]
train_if = np.zeros([X.shape[0], len(n_list)])
test_if = np.zeros([X_test.shape[0], len(n_list)])
roc_if = []
prec_n_if = []
for i in range(len(n_list)):
n = n_list[i]
clf = IsolationForest(n_estimators=n)
clf.fit(X)
train_score = clf.decision_function(X) * -1
pred_score = clf.decision_function(X_test) * -1
roc = np.round(roc_auc_score(y_test, pred_score), decimals=4)
prec_n = np.round(get_precn(y_test, pred_score), decimals=4)
print('if roc / pren @ {n} is {roc} {pren}'.format(n=n, roc=roc,
pren=prec_n))
feature_list.append('if_' + str(n))
roc_if.append(roc)
prec_n_if.append(prec_n)
train_if[:, i] = train_score
test_if[:, i] = pred_score
#########################################################################
X_train_new = np.concatenate((train_knn, train_knn_mean, train_knn_median,
train_lof, train_loop, train_svm, train_if),
axis=1)
X_test_new = np.concatenate((test_knn, test_knn_mean, test_knn_median,
test_lof, test_loop, test_svm, test_if),
axis=1)
X_train_all = np.concatenate((X, X_train_new), axis=1)
X_test_all = np.concatenate((X_test, X_test_new), axis=1)
roc_list = roc_knn + roc_knn_mean + roc_knn_median + roc_lof + roc_loop + roc_svm + roc_if
prec_n_list = prec_n_knn + prec_n_knn_mean + prec_n_knn_median + prec_n_lof + prec_n_loop + prec_n_svm + prec_n_if
# get the results of baselines
print_baseline(X_test_new, y_test, roc_list, prec_n_list)
###########################################################################
# select TOS using different methods
p = 10 # number of selected TOS
# TODO: supplement the cleaned up version for selection methods
##############################################################################
for clf, clf_name in zip(clf_list, clf_name_list):
print('processing', clf_name)
if clf_name != 'xgb':
clf = BalancedBaggingClassifier(base_estimator=clf,
ratio='auto',
replacement=False)
# fully supervised
clf.fit(X, y.ravel())
y_pred = clf.predict_proba(X_test)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 'o'].append(roc_score)
result_dict[clf_name + 'precn' + 'o'].append(prec_n)
# unsupervised
clf.fit(X_train_new, y.ravel())
y_pred = clf.predict_proba(X_test_new)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 'n'].append(roc_score)
result_dict[clf_name + 'precn' + 'n'].append(prec_n)
# semi-supervised
clf.fit(X_train_all, y.ravel())
y_pred = clf.predict_proba(X_test_all)
roc_score = roc_auc_score(y_test, y_pred[:, 1])
prec_n = get_precn(y_test, y_pred[:, 1])
result_dict[clf_name + 'roc' + 's'].append(roc_score)
result_dict[clf_name + 'precn' + 's'].append(prec_n)
for eva in ['roc', 'precn']:
print()
for clf_name in clf_name_list:
print(np.round(np.mean(result_dict[clf_name + eva + 'o']), decimals=4),
eva, clf_name, 'old')
print(np.round(np.mean(result_dict[clf_name + eva + 'n']), decimals=4),
eva, clf_name, 'new')
print(np.round(np.mean(result_dict[clf_name + eva + 's']), decimals=4),
eva, clf_name, 'all')
| 12,621 | 35.479769 | 118 | py |
La-MAML | La-MAML-main/main.py | import importlib
import datetime
import argparse
import time
import os
import ipdb
from tqdm import tqdm
import torch
from torch.autograd import Variable
import parser as file_parser
from metrics.metrics import confusion_matrix
from utils import misc_utils
from main_multi_task import life_experience_iid, eval_iid_tasks
def eval_class_tasks(model, tasks, args):
model.eval()
result = []
for t, task_loader in enumerate(tasks):
rt = 0
for (i, (x, y)) in enumerate(task_loader):
if args.cuda:
x = x.cuda()
_, p = torch.max(model(x, t).data.cpu(), 1, keepdim=False)
rt += (p == y).float().sum()
result.append(rt / len(task_loader.dataset))
return result
def eval_tasks(model, tasks, args):
model.eval()
result = []
for i, task in enumerate(tasks):
t = i
x = task[1]
y = task[2]
rt = 0
eval_bs = x.size(0)
for b_from in range(0, x.size(0), eval_bs):
b_to = min(b_from + eval_bs, x.size(0) - 1)
if b_from == b_to:
xb = x[b_from].view(1, -1)
yb = torch.LongTensor([y[b_to]]).view(1, -1)
else:
xb = x[b_from:b_to]
yb = y[b_from:b_to]
if args.cuda:
xb = xb.cuda()
_, pb = torch.max(model(xb, t).data.cpu(), 1, keepdim=False)
rt += (pb == yb).float().sum()
result.append(rt / x.size(0))
return result
def life_experience(model, inc_loader, args):
result_val_a = []
result_test_a = []
result_val_t = []
result_test_t = []
time_start = time.time()
test_tasks = inc_loader.get_tasks("test")
val_tasks = inc_loader.get_tasks("val")
evaluator = eval_tasks
if args.loader == "class_incremental_loader":
evaluator = eval_class_tasks
for task_i in range(inc_loader.n_tasks):
task_info, train_loader, _, _ = inc_loader.new_task()
for ep in range(args.n_epochs):
model.real_epoch = ep
prog_bar = tqdm(train_loader)
for (i, (x, y)) in enumerate(prog_bar):
if((i % args.log_every) == 0):
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
v_x = x
v_y = y
if args.arch == 'linear':
v_x = x.view(x.size(0), -1)
if args.cuda:
v_x = v_x.cuda()
v_y = v_y.cuda()
model.train()
loss = model.observe(Variable(v_x), Variable(v_y), task_info["task"])
prog_bar.set_description(
"Task: {} | Epoch: {}/{} | Iter: {} | Loss: {} | Acc: Total: {} Current Task: {} ".format(
task_info["task"], ep+1, args.n_epochs, i%(1000*args.n_epochs), round(loss, 3),
round(sum(result_val_a[-1]).item()/len(result_val_a[-1]), 5), round(result_val_a[-1][task_info["task"]].item(), 5)
)
)
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
if args.calc_test_accuracy:
result_test_a.append(evaluator(model, test_tasks, args))
result_test_t.append(task_info["task"])
print("####Final Validation Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_val_a[-1])/len(result_val_a[-1]), result_val_a[-1]))
if args.calc_test_accuracy:
print("####Final Test Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_test_a[-1])/len(result_test_a[-1]), result_test_a[-1]))
time_end = time.time()
time_spent = time_end - time_start
return torch.Tensor(result_val_t), torch.Tensor(result_val_a), torch.Tensor(result_test_t), torch.Tensor(result_test_a), time_spent
def save_results(args, result_val_t, result_val_a, result_test_t, result_test_a, model, spent_time):
fname = os.path.join(args.log_dir, 'results')
# save confusion matrix and print one line of stats
val_stats = confusion_matrix(result_val_t, result_val_a, args.log_dir, 'results.txt')
one_liner = str(vars(args)) + ' # val: '
one_liner += ' '.join(["%.3f" % stat for stat in val_stats])
test_stats = 0
if args.calc_test_accuracy:
test_stats = confusion_matrix(result_test_t, result_test_a, args.log_dir, 'results.txt')
one_liner += ' # test: ' + ' '.join(["%.3f" % stat for stat in test_stats])
print(fname + ': ' + one_liner + ' # ' + str(spent_time))
# save all results in binary file
torch.save((result_val_t, result_val_a, model.state_dict(),
val_stats, one_liner, args), fname + '.pt')
return val_stats, test_stats
def main():
parser = file_parser.get_parser()
args = parser.parse_args()
# initialize seeds
misc_utils.init_seed(args.seed)
# set up loader
# 2 options: class_incremental and task_incremental
# experiments in the paper only use task_incremental
Loader = importlib.import_module('dataloaders.' + args.loader)
loader = Loader.IncrementalLoader(args, seed=args.seed)
n_inputs, n_outputs, n_tasks = loader.get_dataset_info()
# setup logging
timestamp = misc_utils.get_date_time()
args.log_dir, args.tf_dir = misc_utils.log_dir(args, timestamp)
# load model
Model = importlib.import_module('model.' + args.model)
model = Model.Net(n_inputs, n_outputs, n_tasks, args)
if args.cuda:
try:
model.net.cuda()
except:
pass
# run model on loader
if args.model == "iid2":
# oracle baseline with all task data shown at same time
result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience_iid(
model, loader, args)
else:
# for all the CL baselines
result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience(
model, loader, args)
# save results in files or print on terminal
save_results(args, result_val_t, result_val_a, result_test_t, result_test_a, model, spent_time)
if __name__ == "__main__":
main()
| 6,437 | 32.185567 | 154 | py |
La-MAML | La-MAML-main/main_multi_task.py | import time
import os
from tqdm import tqdm
import torch
from torch.autograd import Variable
def eval_iid_tasks(model, tasks, args):
model.eval()
result = []
for t, task_loader in enumerate(tasks):
rt = 0
for (i, (x, y, super_y)) in enumerate(task_loader):
if args.cuda:
x = x.cuda()
_, p = torch.max(model(x, super_y).data.cpu(), 1, keepdim=False)
rt += (p == y).float().sum()
result.append(rt / len(task_loader.dataset))
return result
def life_experience_iid(model, inc_loader, args):
result_val_a = []
result_test_a = []
result_val_t = []
result_test_t = []
time_start = time.time()
test_tasks = inc_loader.get_tasks("test")
val_tasks = inc_loader.get_tasks("val")
task_info, train_loader, _, _ = inc_loader.new_task()
evaluator = eval_iid_tasks
for ep in range(args.n_epochs):
model.real_epoch = ep
prog_bar = tqdm(train_loader)
for (i, (x, y, super_y)) in enumerate(prog_bar):
if((i % args.log_every) == 0):
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
v_x = x
v_y = y
if args.arch == 'linear':
v_x = x.view(x.size(0), -1)
super_v_y = super_y
if args.cuda:
v_x = v_x.cuda()
v_y = v_y.cuda()
super_v_y = super_v_y.cuda()
model.train()
loss = model.observe(Variable(v_x), Variable(v_y), Variable(super_v_y))
prog_bar.set_description(
"Epoch: {}/{} | Iter: {} | Loss: {} | Acc: Total: {}".format(
ep+1, args.n_epochs, i%(1000*args.n_epochs), round(loss, 3),
round(sum(result_val_a[-1]).item()/len(result_val_a[-1]), 5)
)
)
result_val_a.append(evaluator(model, val_tasks, args))
result_val_t.append(task_info["task"])
if args.calc_test_accuracy:
result_test_a.append(evaluator(model, test_tasks, args))
result_test_t.append(task_info["task"])
print("####Final Validation Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_val_a[-1])/len(result_val_a[-1]), result_val_a[-1]))
if args.calc_test_accuracy:
print("####Final Test Accuracy####")
print("Final Results:- \n Total Accuracy: {} \n Individual Accuracy: {}".format(sum(result_test_a[-1])/len(result_test_a[-1]), result_test_a[-1]))
time_end = time.time()
time_spent = time_end - time_start
return torch.Tensor(result_val_t), torch.Tensor(result_val_a), torch.Tensor(result_test_t), torch.Tensor(result_test_a), time_spent
| 2,818 | 30.674157 | 154 | py |
La-MAML | La-MAML-main/metrics/metrics.py | ### We directly copied the metrics.py model file from the GEM project https://github.com/facebookresearch/GradientEpisodicMemory
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import ipdb
import os
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import torch
def task_changes(result_t):
n_tasks = int(result_t.max() + 1)
changes = []
current = result_t[0]
for i, t in enumerate(result_t):
if t != current:
changes.append(i)
current = t
return n_tasks, changes
def confusion_matrix(result_t, result_a, log_dir, fname=None):
nt, changes = task_changes(result_t)
fname = os.path.join(log_dir, fname)
baseline = result_a[0]
changes = torch.LongTensor(changes + [result_a.size(0)]) - 1
result = result_a[(torch.LongTensor(changes))]
# acc[t] equals result[t,t]
acc = result.diag()
fin = result[nt - 1]
# bwt[t] equals result[T,t] - acc[t]
bwt = result[nt - 1] - acc
# fwt[t] equals result[t-1,t] - baseline[t]
fwt = torch.zeros(nt)
for t in range(1, nt):
fwt[t] = result[t - 1, t] - baseline[t]
if fname is not None:
f = open(fname, 'w')
print(' '.join(['%.4f' % r for r in baseline]), file=f)
print('|', file=f)
for row in range(result.size(0)):
print(' '.join(['%.4f' % r for r in result[row]]), file=f)
print('', file=f)
print('Diagonal Accuracy: %.4f' % acc.mean(), file=f)
print('Final Accuracy: %.4f' % fin.mean(), file=f)
print('Backward: %.4f' % bwt.mean(), file=f)
print('Forward: %.4f' % fwt.mean(), file=f)
f.close()
colors = cm.nipy_spectral(np.linspace(0, 1, len(result)))
figure = plt.figure(figsize=(8, 8))
ax = plt.gca()
data = np.array(result_a)
for i in range(len(data[0])):
plt.plot(range(data.shape[0]), data[:,i], label=str(i), color=colors[i], linewidth=2)
plt.savefig(log_dir + '/' + 'task_wise_accuracy.png')
stats = []
stats.append(acc.mean())
stats.append(fin.mean())
stats.append(bwt.mean())
stats.append(fwt.mean())
return stats
| 2,348 | 28 | 128 | py |
La-MAML | La-MAML-main/dataloaders/idataset.py |
import numpy as np
from PIL import Image
import torch
from torchvision import datasets, transforms
import os
from dataloaders import cifar_info
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, x, y, trsf, pretrsf = None, imgnet_like = False, super_y = None):
self.x, self.y = x, y
self.super_y = super_y
# transforms to be applied before and after conversion to imgarray
self.trsf = trsf
self.pretrsf = pretrsf
# if not from imgnet, needs to be converted to imgarray first
self.imgnet_like = imgnet_like
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
if self.super_y is not None:
super_y = self.super_y[idx]
if(self.pretrsf is not None):
x = self.pretrsf(x)
if(not self.imgnet_like):
x = Image.fromarray(x)
x = self.trsf(x)
if self.super_y is not None:
return x, y, super_y
else:
return x, y
class DummyArrayDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x, self.y = x, y
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
return x, y
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif dataset_name == "tinyimagenet":
return iImgnet
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
class DataHandler:
base_dataset = None
train_transforms = []
common_transforms = [transforms.ToTensor()]
class_order = None
class iImgnet(DataHandler):
base_dataset = datasets.ImageFolder
top_transforms = [
lambda x: Image.open(x[0]).convert('RGB'),
]
train_transforms = [
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip() #,
#transforms.ColorJitter(brightness=63 / 255)
]
common_transforms = [
transforms.Resize((64, 64)),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]
class_order = [
i for i in range(200)
]
class iCIFAR10(DataHandler):
base_dataset = datasets.cifar.CIFAR10
base_dataset_hierarchy = cifar_info.CIFAR10
top_transforms = [
]
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=63 / 255)
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]
class iCIFAR100(iCIFAR10):
base_dataset = datasets.cifar.CIFAR100
base_dataset_hierarchy = cifar_info.CIFAR100
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
# update: class order can now be chosen randomly since it just depends on seed
class_order = [
87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18,
24, 32, 45, 88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59,
25, 20, 80, 73, 1, 28, 6, 46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21,
60, 19, 70, 90, 89, 43, 5, 42, 65, 76, 40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7,
34, 55, 54, 26, 35, 39
] ## some random class order
class_order_super = [4, 95, 55, 30, 72, 73, 1, 67, 32, 91, 62, 92, 70, 54, 82, 10, 61, 28, 9, 16, 53,
83, 51, 0, 57, 87, 86, 40, 39, 22, 25, 5, 94, 84, 20, 18, 6, 7, 14, 24, 88, 97,
3, 43, 42, 17, 37, 12, 68, 76, 71, 60, 33, 23, 49, 38, 21, 15, 31, 19, 75, 66, 34,
63, 64, 45, 99, 26, 77, 79, 46, 98, 11, 2, 35, 93, 78, 44, 29, 27, 80, 65, 74, 50,
36, 52, 96, 56, 47, 59, 90, 58, 48, 13, 8, 69, 81, 41, 89, 85
] ## parent-wise split
| 4,465 | 29.8 | 105 | py |
La-MAML | La-MAML-main/dataloaders/cifar_info.py | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import check_integrity, download_and_extract_archive
import ipdb
# defining the mapping from parent classes to fine-grained classes in cifar
# in case one needs to split tasks by parent class
super_class_to_class = {
'aquatic_mammals' : ['beaver', 'dolphin', 'otter', 'seal', 'whale'],
'fish' : ['aquarium_fish', 'flatfish', 'ray', 'shark', 'trout'],
'flowers' : ['orchid', 'poppy', 'rose', 'sunflower', 'tulip'],
'food_containers' : ['bottle', 'bowl', 'can', 'cup', 'plate'],
'fruit_and_vegetables' : ['apple', 'mushroom', 'orange', 'pear', 'sweet_pepper'],
'household_electrical_devices' : ['clock', 'keyboard', 'lamp', 'telephone', 'television'],
'household_furniture' : ['bed', 'chair', 'couch', 'table', 'wardrobe'],
'insects' : ['bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach'],
'large_carnivores' : ['bear', 'leopard', 'lion', 'tiger', 'wolf'],
'large_man-made_outdoor_things' : ['bridge', 'castle', 'house', 'road', 'skyscraper'],
'large_natural_outdoor_scenes' : ['cloud', 'forest', 'mountain', 'plain', 'sea'],
'large_omnivores_and_herbivores' : ['camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo'],
'medium_mammals' : ['fox', 'porcupine', 'possum', 'raccoon', 'skunk'],
'non-insect_invertebrates' : ['crab', 'lobster', 'snail', 'spider', 'worm'],
'people' : ['baby', 'boy', 'girl', 'man', 'woman'],
'reptiles' : ['crocodile', 'dinosaur', 'lizard', 'snake', 'turtle'],
'small_mammals' : ['hamster', 'mouse', 'rabbit', 'shrew', 'squirrel'],
'trees': ['maple_tree', 'oak_tree', 'palm_tree', 'pine_tree', 'willow_tree'],
'vehicles_1' : ['bicycle', 'bus', 'motorcycle', 'pickup_truck', 'train'],
'vehicles_2' : ['lawn_mower', 'rocket', 'streetcar', 'tank', 'tractor']
}
class CIFAR10(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(CIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.high_level_supervise = True
self.data = []
self.targets = []
self.super_targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.super_targets.extend(entry['coarse_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.super_classes = data[self.meta['coarse_key']]
self.get_class_ids()
def get_class_ids(self):
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
self.super_class_to_idx = {_class: i for i, _class in enumerate(self.super_classes)}
high_ids = []
low_ids = []
low_idxs = np.arange(len(self.classes))
for key in super_class_to_class:
for classes in super_class_to_class[key]:
high_ids.append(self.super_class_to_idx[key])
low_ids.append(self.class_to_idx[classes])
high_ids_np = np.array(high_ids)
low_ids_np = np.array(low_ids)
self.low_high_map = np.stack([low_ids_np, high_ids_np], axis = 1)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, super_target = self.data[index], self.targets[index], self.super_targets[index]
if(self.high_level_supervise):
target = super_target
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, super_target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'coarse_key': 'coarse_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 8,912 | 36.607595 | 100 | py |
La-MAML | La-MAML-main/dataloaders/task_sampler.py | # coding=utf-8
import numpy as np
import torch
import warnings
import ipdb
class MultiTaskSampler(object):
'''
MultiTaskSampler: yield a batch of indexes at each iteration.
Indexes are calculated by keeping in account 'classes_per_it' and 'num_samples',
In fact at every iteration the batch indexes will refer to 'num_support' + 'num_query' samples
for 'classes_per_it' random classes.
__len__ returns the number of episodes per epoch (same as 'self.iterations').
'''
def __init__(self, labels, classes_per_it, num_samples, iterations):
'''
Initialize the MultiTaskSampler object
Args:
- labels: an iterable containing all the labels for the current dataset
samples indexes will be infered from this iterable.
- classes_per_it: number of random classes for each iteration
- num_samples: number of samples for each iteration for each class (support + query)
- iterations: number of iterations (episodes) per epoch
'''
super(MultiTaskSampler, self).__init__()
self.labels = labels
self.classes_per_it = classes_per_it
self.sample_per_class = num_samples
self.iterations = iterations
self.classes, self.counts = np.unique(self.labels, return_counts=True)
if self.classes_per_it > len(self.classes):
warnings.warn('Number of classes per iteration is higher than the number of unique labels')
self.classes_per_it = len(self.classes)
self.classes = torch.LongTensor(self.classes)
# create a matrix, indexes, of dim: classes X max(elements per class)
# fill it with nans
# for every class c, fill the relative row with the indices samples belonging to c
# in numel_per_class we store the number of samples for each class/row
self.idxs = range(len(self.labels))
self.indexes = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan
self.indexes = torch.Tensor(self.indexes)
self.numel_per_class = torch.zeros_like(self.classes)
for idx, label in enumerate(self.labels):
label_idx = np.argwhere(self.classes == label).item()
self.indexes[label_idx, np.where(np.isnan(self.indexes[label_idx]))[0][0]] = idx
self.numel_per_class[label_idx] += 1
def __iter__(self):
'''
yield a batch of indexes
'''
spc = self.sample_per_class
cpi = self.classes_per_it
for it in range(self.iterations):
batch_size = spc * cpi
batch = torch.LongTensor(batch_size)
c_idxs = torch.randperm(len(self.classes))[:cpi]
for i, c in enumerate(self.classes[c_idxs]):
s = slice(i * spc, (i + 1) * spc)
# FIXME when torch.argwhere will exist
label_idx = torch.arange(len(self.classes)).long()[self.classes == c].item()
sample_idxs = torch.randperm(self.numel_per_class[label_idx])[:spc]
batch[s] = self.indexes[label_idx][sample_idxs]
batch = batch[torch.randperm(len(batch))]
yield batch
def __len__(self):
'''
returns the number of iterations (episodes) per epoch
'''
return self.iterations
| 3,363 | 39.047619 | 103 | py |
La-MAML | La-MAML-main/dataloaders/class_incremental_loader.py | import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets, transforms
from dataloaders.idataset import _get_datasets, DummyDataset
import random
import ipdb
# --------
# Datasets CIFAR and TINYIMAGENET
# --------
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
dataset_name=opt.dataset
validation_split=opt.validation
self.increment=opt.increment
datasets = _get_datasets(dataset_name)
self._setup_data(
datasets,
class_order_type=opt.class_order,
seed=seed,
increment=self.increment,
validation_split=validation_split
)
self.validation_split = validation_split
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
self.top_transforms = datasets[0].top_transforms
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks(validation_split)
@property
def n_tasks(self):
return len(self.increments)
def new_task(self, memory=None):
if self._current_task >= len(self.increments):
raise Exception("No more tasks.")
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
x_train, y_train = self._select(
self.data_train, self.targets_train, low_range=min_class, high_range=max_class
)
x_val, y_val = self._select(
self.data_val, self.targets_val, low_range=min_class, high_range=max_class
)
x_test, y_test = self._select(self.data_test, self.targets_test, high_range=max_class)
if memory is not None:
data_memory, targets_memory = memory
print("Set memory of size: {}.".format(data_memory.shape[0]))
x_train = np.concatenate((x_train, data_memory))
y_train = np.concatenate((y_train, targets_memory))
train_loader = self._get_loader(x_train, y_train, mode="train")
val_loader = self._get_loader(x_val, y_val, mode="train") if len(x_val) > 0 else None
test_loader = self._get_loader(x_test, y_test, mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": x_train.shape[0],
"n_test_data": x_test.shape[0]
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _setup_test_tasks(self, validation_split):
self.test_tasks = []
self.val_tasks = []
for i in range(len(self.increments)):
min_class = sum(self.increments[:i])
max_class = sum(self.increments[:i + 1])
x_test, y_test = self._select(self.data_test, self.targets_test, low_range=min_class, high_range=max_class)
self.test_tasks.append(self._get_loader(x_test, y_test, mode="test"))
if validation_split > 0.0:
x_val, y_val = self._select(self.data_val, self.targets_val, low_range=min_class, high_range=max_class)
self.val_tasks.append(self._get_loader(x_val, y_val, mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'val':
if self.validation_split > 0.0:
return self.val_tasks
else:
return self.test_tasks
elif dataset_type == 'test':
return self.test_tasks
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
if(self._opt.dataset == 'tinyimagenet'):
n_inputs = 3*64*64
else:
n_inputs = self.data_train.shape[3]*self.data_train.shape[1]*self.data_train.shape[2]
n_outputs = self._opt.increment * len(self.increments)
n_task = len(self.increments)
return n_inputs, n_outputs, n_task
def _select(self, x, y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(y >= low_range, y < high_range))[0]
return x[idxes], y[idxes]
def _get_loader(self, x, y, shuffle=True, mode="train"):
if mode == "train":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])
batch_size = self._batch_size
elif mode == "test":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose(self.common_transforms)
batch_size = self._test_batch_size
elif mode == "flip":
trsf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=1.), *self.common_transforms]
)
batch_size = self._test_batch_size
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet'),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
def _setup_data(self, datasets, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
self.data_train, self.targets_train = [], []
self.data_test, self.targets_test = [], []
self.data_val, self.targets_val = [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self._opt.dataset == 'tinyimagenet'):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path + 'train/')
test_dataset = dataset.base_dataset(root_path + 'val/')
train_dataset.data = train_dataset.samples
test_dataset.data = test_dataset.samples
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
else:
print("Classes are presented in a chronological order")
else:
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path, train=True, download=True)
test_dataset = dataset.base_dataset(root_path, train=False, download=True)
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
elif class_order_type == 'super' and dataset.class_order_super is not None:
order = dataset.class_order_super
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
@staticmethod
def _split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
x = x[shuffled_indexes]
y = y[shuffled_indexes]
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
@staticmethod
def _list_split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
c = list(zip(x, y))
random.shuffle(c)
x, y = zip(*c)
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val_i = [x[i] for i in val_indexes]
y_val_i = [y[i] for i in val_indexes]
x_train_i = [x[i] for i in train_indexes]
y_train_i = [y[i] for i in train_indexes]
x_val.append(x_val_i)
y_val.append(y_val_i)
x_train.append(x_train_i)
y_train.append(y_train_i)
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
def get_idx_data(self, idx, batch_size, mode="test", data_source="train"):
"""Returns a custom loader with specific idxs only.
:param idx: A list of data indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if data_source == "train":
x, y = self.data_train, self.targets_train
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
y, sorted_idx = y.sort()
sampler = torch.utils.data.sampler.SubsetRandomSampler(idx)
trsf = transforms.Compose(self.common_transforms)
loader = DataLoader(
DummyDataset(x[sorted_idx], y, trsf),
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=self._workers)
def get_custom_loader(self, class_indexes, mode="test", data_source="train"):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y = self.data_train, self.targets_train
elif data_source == "val":
x, y = self.data_val, self.targets_val
elif data_source == "test":
x, y = self.data_test, self.targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets = [], []
for class_index in class_indexes:
class_data, class_targets = self._select(
x, y, low_range=class_index, high_range=class_index + 1
)
data.append(class_data)
targets.append(class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
return data, self._get_loader(data, targets, shuffle=False, mode=mode) | 14,676 | 38.138667 | 119 | py |
La-MAML | La-MAML-main/dataloaders/multi_task_loader.py | import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets, transforms
from dataloaders.idataset import _get_datasets, DummyDataset
from dataloaders.task_sampler import MultiTaskSampler
import random
import ipdb
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
dataset_name=opt.dataset
validation_split=opt.validation
self.increment=opt.increment
datasets = _get_datasets(dataset_name)
self._setup_data(
datasets,
class_order_type=opt.class_order,
seed=seed,
increment=self.increment,
validation_split=validation_split
)
self.validation_split = validation_split
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
self.top_transforms = datasets[0].top_transforms
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks(validation_split)
@property
def n_tasks(self):
return len(self.increments)
def new_task(self):
min_class = 0
max_class = max(self.targets_train) + 1
x_train, y_train, super_y_train = self._select(
self.data_train, self.targets_train, self.super_targets_train, low_range=min_class, high_range=max_class
)
x_val, y_val, super_y_val = self._select(
self.data_val, self.targets_val, self.super_targets_val, low_range=min_class, high_range=max_class
)
x_test, y_test, super_y_test = self._select(self.data_test, self.targets_test, self.super_targets_test, high_range=max_class)
train_loader = self._get_loader(x_train, y_train, super_y_train, mode="train")
val_loader = self._get_loader(x_val, y_val, super_y_val, mode="train") if len(x_val) > 0 else None
test_loader = self._get_loader(x_test, y_test, super_y_test, mode="test")
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": x_train.shape[0],
"n_test_data": x_test.shape[0]
}
self._current_task += 1
return task_info, train_loader, val_loader, test_loader
def _setup_test_tasks(self, validation_split):
self.test_tasks = []
self.val_tasks = []
for i in range(len(self.increments)):
min_class = i
max_class = i+1
x_test, y_test, super_y_test = self._select_super(self.data_test, self.targets_test, self.super_targets_test, low_range=min_class, high_range=max_class)
self.test_tasks.append(self._get_loader(x_test, y_test, super_y_test, mode="test"))
if validation_split > 0.0:
x_val, y_val, super_y_val = self._select_super(self.data_val, self.targets_val, self.super_targets_val, low_range=min_class, high_range=max_class)
self.val_tasks.append(self._get_loader(x_val, y_val, super_y_val, mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'val':
if self.validation_split > 0.0:
return self.val_tasks
else:
return self.test_tasks
elif dataset_type == 'test':
return self.test_tasks
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
if(self._opt.dataset == 'tinyimagenet'):
n_inputs = 3*64*64
else:
n_inputs = self.data_train.shape[3]*self.data_train.shape[1]*self.data_train.shape[2]
n_outputs = self._opt.increment * len(self.increments)
n_task = len(self.increments)
return n_inputs, n_outputs, n_task
def _select(self, x, y, super_y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(y >= low_range, y < high_range))[0]
return x[idxes], y[idxes], super_y[idxes]
def _select_super(self, x, y, super_y, low_range=0, high_range=0):
idxes = np.where(np.logical_and(super_y >= low_range, super_y < high_range))[0]
return x[idxes], y[idxes], super_y[idxes]
def _get_loader(self, x, y, super_y, shuffle=True, mode="train"):
if mode == "train":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose([*self.train_transforms, *self.common_transforms])
batch_size = self._batch_size
sampler = self._get_sampler(super_y, mode)
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet', super_y),
batch_sampler=sampler,
shuffle=False,
num_workers=self._workers
)
elif mode == "test" or mode == "flip":
if mode == "test":
pretrsf = transforms.Compose([*self.top_transforms])
trsf = transforms.Compose(self.common_transforms)
batch_size = self._test_batch_size
elif mode == "flip":
trsf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=1.), *self.common_transforms]
)
batch_size = self._test_batch_size
return DataLoader(
DummyDataset(x, y, trsf, pretrsf, self._opt.dataset=='tinyimagenet', super_y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
def _get_sampler(self, labels, mode):
assert self._batch_size%self._opt.classes_per_it == 0, \
"Batch size should be a multiple of number of desired classes in a iter"
if 'train' in mode:
classes_per_it = self._opt.classes_per_it
num_samples = int(self._batch_size/self._opt.classes_per_it)
elif 'val' in mode:
classes_per_it = self._opt.classes_per_it
num_samples = int(self._batch_size/self._opt.classes_per_it)
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return MultiTaskSampler(labels=labels,
classes_per_it=classes_per_it,
num_samples=num_samples,
iterations=self._opt.iterations)
def _setup_data(self, datasets, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
self.data_train, self.targets_train, self.super_targets_train = [], [], []
self.data_test, self.targets_test, self.super_targets_test = [], [], []
self.data_val, self.targets_val, self.super_targets_val = [], [], []
self.increments = []
self.class_order = []
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self._opt.dataset == 'tinyimagenet'):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path + 'train/')
test_dataset = dataset.base_dataset(root_path + 'val/')
train_dataset.data = train_dataset.samples
test_dataset.data = test_dataset.samples
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
super_y_train = self._make_super_classes(y_train, self.increment)
super_y_test = self._make_super_classes(y_test, self.increment)
super_y_val = self._make_super_classes(y_val, self.increment)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
elif ((self._opt.dataset == 'cifar100') and (self._opt.model=="iid2")):
root_path = self._opt.data_path
train_dataset = dataset.base_dataset(root_path, train=True, download=True)
test_dataset = dataset.base_dataset(root_path, train=False, download=True)
x_train, y_train = train_dataset.data, np.array(train_dataset.targets)
x_val, y_val, x_train, y_train = self._list_split_per_class(
x_train, y_train, validation_split
)
x_test, y_test = test_dataset.data, np.array(test_dataset.targets)
order = [i for i in range(len(np.unique(y_train)))]
if class_order_type == 'random':
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
print("Class order:", order)
elif class_order_type == 'old' and dataset.class_order is not None:
order = dataset.class_order
elif class_order_type == 'super' and dataset.class_order_super is not None:
order = dataset.class_order_super
else:
print("Classes are presented in a chronological order")
self.class_order.append(order)
y_train = self._map_new_class_index(y_train, order)
y_val = self._map_new_class_index(y_val, order)
y_test = self._map_new_class_index(y_test, order)
super_y_train = self._make_super_classes(y_train, self.increment)
super_y_test = self._make_super_classes(y_test, self.increment)
super_y_val = self._make_super_classes(y_val, self.increment)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
current_class_idx += len(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
else:
root_path = self._opt.data_path
train_dataset = dataset.base_dataset_hierarchy(root_path, train=True, download=True)
test_dataset = dataset.base_dataset_hierarchy(root_path, train=False, download=True)
x_train, y_train, super_y_train = train_dataset.data, np.array(train_dataset.targets), np.array(train_dataset.super_targets)
x_val, y_val, super_y_val, x_train, y_train, super_y_train = self._split_per_class(
x_train, y_train, super_y_train, validation_split
)
x_test, y_test, super_y_test = test_dataset.data, np.array(test_dataset.targets), np.array(test_dataset.super_targets)
idxs = np.argsort(super_y_test)
x_test = x_test[idxs]
y_test = y_test[idxs]
super_y_test = super_y_test[idxs]
idxs = np.argsort(super_y_train)
x_train = x_train[idxs]
y_train = y_train[idxs]
super_y_train = super_y_train[idxs]
idxs = np.argsort(super_y_val)
x_val = x_val[idxs]
y_val = y_val[idxs]
super_y_val = super_y_val[idxs]
idxs = np.unique(y_test, return_index=True)[1]
unique_y_order = [y_test[id] for id in sorted(idxs)]
unique_supery_order = [super_y_test[id] for id in sorted(idxs)]
print(unique_supery_order)
print(unique_y_order)
y_train = self._map_new_class_index(y_train, unique_y_order)
y_val = self._map_new_class_index(y_val, unique_y_order)
y_test = self._map_new_class_index(y_test, unique_y_order)
y_train += current_class_idx
y_val += current_class_idx
y_test += current_class_idx
# current_class_idx += len(order)
# if len(datasets) > 1:
# raise(Exception("current_class_idx doesnt work for more than one dataset right now, correct it"))
# self.increments.append(len(order))
# else:
self.increments = [increment for _ in range(20)]
self.data_train.append(x_train)
self.targets_train.append(y_train)
self.super_targets_train.append(super_y_train)
self.data_val.append(x_val)
self.targets_val.append(y_val)
self.super_targets_val.append(super_y_val)
self.data_test.append(x_test)
self.targets_test.append(y_test)
self.super_targets_test.append(super_y_test)
# print(self.increments)
self.data_train = np.concatenate(self.data_train)
self.targets_train = np.concatenate(self.targets_train)
self.super_targets_train = np.concatenate(self.super_targets_train)
self.data_val = np.concatenate(self.data_val)
self.targets_val = np.concatenate(self.targets_val)
self.super_targets_val = np.concatenate(self.super_targets_val)
self.data_test = np.concatenate(self.data_test)
self.targets_test = np.concatenate(self.targets_test)
self.super_targets_test = np.concatenate(self.super_targets_test)
def _make_super_classes(self, y, increment):
unique_y = np.unique(y)
super_y = [int(i/increment) for i in range(len(unique_y))]
super_order = [super_y[y[i]] for i in range(len(y))]
return super_order
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
@staticmethod
def _split_per_class(x, y, super_y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has same amount of data.
"""
shuffled_indexes = np.random.permutation(x.shape[0])
# idxs = np.argsort(y)
x = x[shuffled_indexes]
y = y[shuffled_indexes]
super_y = super_y[shuffled_indexes]
x_val, y_val, super_y_val = [], [], []
x_train, y_train, super_y_train = [], [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val.append(x[val_indexes])
y_val.append(y[val_indexes])
super_y_val.append(super_y[val_indexes])
x_train.append(x[train_indexes])
y_train.append(y[train_indexes])
super_y_train.append(super_y[train_indexes])
x_val, y_val, super_y_val = np.concatenate(x_val), np.concatenate(y_val), np.concatenate(super_y_val)
x_train, y_train, super_y_train = np.concatenate(x_train), np.concatenate(y_train), np.concatenate(super_y_train)
return x_val, y_val, super_y_val, x_train, y_train, super_y_train
@staticmethod
def _list_split_per_class(x, y, validation_split=0.):
"""Splits train data for a subset of validation data.
Split is done so that each class has a much data.
"""
c = list(zip(x, y))
random.shuffle(c)
x, y = zip(*c)
x_val, y_val = [], []
x_train, y_train = [], []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
nb_val_elts = int(class_indexes.shape[0] * validation_split)
val_indexes = class_indexes[:nb_val_elts]
train_indexes = class_indexes[nb_val_elts:]
x_val_i = [x[i] for i in val_indexes]
y_val_i = [y[i] for i in val_indexes]
x_train_i = [x[i] for i in train_indexes]
y_train_i = [y[i] for i in train_indexes]
x_val.append(x_val_i)
y_val.append(y_val_i)
x_train.append(x_train_i)
y_train.append(y_train_i)
x_val, y_val = np.concatenate(x_val), np.concatenate(y_val)
x_train, y_train = np.concatenate(x_train), np.concatenate(y_train)
return x_val, y_val, x_train, y_train
###### Unused functions
def get_idx_data(self, idx, batch_size, mode="test", data_source="train"):
"""Returns a custom loader with specific idxs only.
:param idx: A list of data indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if data_source == "train":
x, y, super_y = self.data_train, self.targets_train, self.super_targets_train
elif data_source == "val":
x, y, super_y = self.data_val, self.targets_val, self.super_targets_val
elif data_source == "test":
x, y, super_y = self.data_test, self.targets_test, self.super_targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
y, sorted_idx = y.sort()
sampler = torch.utils.data.sampler.SubsetRandomSampler(idx)
trsf = transforms.Compose(self.common_transforms)
loader = DataLoader(
DummyDataset(x[sorted_idx], y, trsf, super_y=super_y[sorted_idx]),
sampler=sampler,
batch_size=batch_size,
shuffle=False,
num_workers=self._workers)
def get_custom_loader(self, class_indexes, mode="test", data_source="train"):
"""Returns a custom loader.
:param class_indexes: A list of class indexes that we want.
:param mode: Various mode for the transformations applied on it.
:param data_source: Whether to fetch from the train, val, or test set.
:return: The raw data and a loader.
"""
if not isinstance(class_indexes, list): # TODO: deprecated, should always give a list
class_indexes = [class_indexes]
if data_source == "train":
x, y, super_y = self.data_train, self.targets_train, self.super_targets_train
elif data_source == "val":
x, y, super_y = self.data_val, self.targets_val, self.super_targets_val
elif data_source == "test":
x, y, super_y = self.data_test, self.targets_test, self.super_targets_test
else:
raise ValueError("Unknown data source <{}>.".format(data_source))
data, targets, super_targets = [], [], []
for class_index in class_indexes:
class_data, class_targets, super_class_targets = self._select(
x, y, super_y, low_range=class_index, high_range=class_index + 1
)
data.append(class_data)
targets.append(class_targets)
super_targets.append(super_class_targets)
data = np.concatenate(data)
targets = np.concatenate(targets)
super_targets = np.concatenate(super_targets)
return data, self._get_loader(data, targets, super_targets, shuffle=False, mode=mode)
| 21,088 | 41.863821 | 164 | py |
La-MAML | La-MAML-main/dataloaders/task_incremental_loader.py | import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from dataloaders.idataset import DummyArrayDataset
import os
class IncrementalLoader:
def __init__(
self,
opt,
shuffle=True,
seed=1,
):
self._opt = opt
validation_split=opt.validation
increment=opt.increment
self._setup_data(
class_order_type=opt.class_order,
seed=seed,
increment=increment,
validation_split=validation_split
)
self._current_task = 0
self._batch_size = opt.batch_size
self._test_batch_size = opt.test_batch_size
self._workers = opt.workers
self._shuffle = shuffle
self._setup_test_tasks()
@property
def n_tasks(self):
return len(self.test_dataset)
def new_task(self):
if self._current_task >= len(self.test_dataset):
raise Exception("No more tasks.")
p = self.sample_permutations[self._current_task]
x_train, y_train = self.train_dataset[self._current_task][1][p], self.train_dataset[self._current_task][2][p]
x_test, y_test = self.test_dataset[self._current_task][1], self.test_dataset[self._current_task][2]
train_loader = self._get_loader(x_train, y_train, mode="train")
test_loader = self._get_loader(x_test, y_test, mode="test")
task_info = {
"min_class": 0,
"max_class": self.n_outputs,
"increment": -1,
"task": self._current_task,
"max_task": len(self.test_dataset),
"n_train_data": len(x_train),
"n_test_data": len(x_test)
}
self._current_task += 1
return task_info, train_loader, None, test_loader
def _setup_test_tasks(self):
self.test_tasks = []
for i in range(len(self.test_dataset)):
self.test_tasks.append(self._get_loader(self.test_dataset[i][1], self.test_dataset[i][2], mode="test"))
def get_tasks(self, dataset_type='test'):
if dataset_type == 'test':
return self.test_dataset
elif dataset_type == 'val':
return self.test_dataset
else:
raise NotImplementedError("Unknown mode {}.".format(dataset_type))
def get_dataset_info(self):
n_inputs = self.train_dataset[0][1].size(1)
n_outputs = 0
for i in range(len(self.train_dataset)):
n_outputs = max(n_outputs, self.train_dataset[i][2].max())
n_outputs = max(n_outputs, self.test_dataset[i][2].max())
self.n_outputs = n_outputs
return n_inputs, n_outputs.item()+1, self.n_tasks
def _get_loader(self, x, y, shuffle=True, mode="train"):
if mode == "train":
batch_size = self._batch_size
elif mode == "test":
batch_size = self._test_batch_size
else:
raise NotImplementedError("Unknown mode {}.".format(mode))
return DataLoader(
DummyArrayDataset(x, y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._workers
)
def _setup_data(self, class_order_type=False, seed=1, increment=10, validation_split=0.):
# FIXME: handles online loading of images
torch.manual_seed(seed)
self.train_dataset, self.test_dataset = torch.load(os.path.join(self._opt.data_path, self._opt.dataset + ".pt"))
self.sample_permutations = []
# for every task, accumulate a shuffled set of samples_per_task
for t in range(len(self.train_dataset)):
N = self.train_dataset[t][1].size(0)
if self._opt.samples_per_task <= 0:
n = N
else:
n = min(self._opt.samples_per_task, N)
p = torch.randperm(N)[0:n]
self.sample_permutations.append(p)
| 3,964 | 30.468254 | 120 | py |
La-MAML | La-MAML-main/utils/misc_utils.py | import datetime
import glob
import json
import os
import random
import ipdb
import numpy as np
import torch
from tqdm import tqdm
def to_onehot(targets, n_classes):
onehot = torch.zeros(targets.shape[0], n_classes).to(targets.device)
onehot.scatter_(dim=1, index=targets.long().view(-1, 1), value=1.)
return onehot
def _check_loss(loss):
return not bool(torch.isnan(loss).item()) and bool((loss >= 0.).item())
def compute_accuracy(ypred, ytrue, task_size=10):
all_acc = {}
all_acc["total"] = round((ypred == ytrue).sum() / len(ytrue), 3)
for class_id in range(0, np.max(ytrue), task_size):
idxes = np.where(
np.logical_and(ytrue >= class_id, ytrue < class_id + task_size)
)[0]
label = "{}-{}".format(
str(class_id).rjust(2, "0"),
str(class_id + task_size - 1).rjust(2, "0")
)
all_acc[label] = round((ypred[idxes] == ytrue[idxes]).sum() / len(idxes), 3)
return all_acc
def get_date():
return datetime.datetime.now().strftime("%Y%m%d")
def get_date_time():
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')[:-2]
def log_dir(opt, timestamp=None):
if timestamp is None:
timestamp = get_date_time()
rand_num = str(random.randint(1,1001))
logdir = opt.log_dir + '/%s/%s-%s/%s' % (opt.model, opt.expt_name, timestamp, opt.seed)
tfdir = opt.log_dir + '/%s/%s-%s/%s/%s' % (opt.model, opt.expt_name, timestamp, opt.seed, "tfdir")
mkdir(logdir)
mkdir(tfdir)
with open(logdir + '/training_parameters.json', 'w') as f:
json.dump(vars(opt), f, indent=4)
return logdir, tfdir
def save_list_to_file(path, thelist):
with open(path, 'w') as f:
for item in thelist:
f.write("%s\n" % item)
def find_latest_checkpoint(folder_path):
print('searching for checkpoint in : '+folder_path)
files = sorted(glob.iglob(folder_path+'/*.pth'), key=os.path.getmtime, reverse=True)
print('latest checkpoint is:')
print(files[0])
return files[0]
def init_seed(seed):
'''
Disable cudnn to maximize reproducibility
'''
print("Set seed", seed)
random.seed(seed)
torch.cuda.cudnn_enabled = False
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.enabled = False
def find_latest_checkpoint_name(folder_path):
print('searching for checkpoint in : '+folder_path)
files = glob.glob(folder_path+'/*.pth')
min_num = 0
filename = ''
for i, filei in enumerate(files):
ckpt_name = os.path.splitext(filei)
ckpt_num = int(ckpt_name.split('_')[-1])
if(ckpt_num>min_num):
min_num = ckpt_num
filename = filei
print('latest checkpoint is:')
print(filename)
return filename
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def to_numpy(input):
if isinstance(input, torch.Tensor):
return input.cpu().numpy()
elif isinstance(input, np.ndarray):
return input
else:
raise TypeError('Unknown type of input, expected torch.Tensor or '\
'np.ndarray, but got {}'.format(type(input)))
def log_sum_exp(input, dim=None, keepdim=False):
"""Numerically stable LogSumExp.
Args:
input (Tensor)
dim (int): Dimension along with the sum is performed
keepdim (bool): Whether to retain the last dimension on summing
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
input = input.view(-1)
dim = 0
max_val = input.max(dim=dim, keepdim=True)[0]
output = max_val + (input - max_val).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
output = output.squeeze(dim)
return output | 4,173 | 26.642384 | 103 | py |
La-MAML | La-MAML-main/model/lamaml.py | import random
import numpy as np
import ipdb
import math
import torch
import torch.nn as nn
from model.lamaml_base import *
class Net(BaseNet):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__(n_inputs,
n_outputs,
n_tasks,
args)
self.nc_per_task = n_outputs
def forward(self, x, t):
output = self.net.forward(x)
return output
def meta_loss(self, x, fast_weights, y, t):
"""
differentiate the loss through the network updates wrt alpha
"""
logits = self.net.forward(x, fast_weights)
loss_q = self.loss(logits.squeeze(1), y)
return loss_q, logits
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = self.args.second_order
grads = torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required)
for i in range(len(grads)):
torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * nn.functional.relu(p[1][1]), zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
for pass_itr in range(self.glances):
self.pass_itr = pass_itr
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
self.epoch += 1
self.zero_grads()
if t != self.current_task:
self.M = self.M_new
self.current_task = t
batch_sz = x.shape[0]
meta_losses = [0 for _ in range(batch_sz)]
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
fast_weights = None
for i in range(0, batch_sz):
batch_x = x[i].unsqueeze(0)
batch_y = y[i].unsqueeze(0)
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
if(self.real_epoch == 0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
meta_loss, logits = self.meta_loss(bx, fast_weights, by, t)
meta_losses[i] += meta_loss
# Taking the meta gradient step (will update the learning rates)
self.zero_grads()
meta_loss = sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
if self.args.learn_lr:
self.opt_lr.step()
if(self.args.sync_update):
self.opt_wt.step()
else:
for i,p in enumerate(self.net.parameters()):
p.data = p.data - p.grad * nn.functional.relu(self.net.alpha_lr[i])
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return meta_loss.item() | 3,695 | 31.421053 | 126 | py |
La-MAML | La-MAML-main/model/meta-bgd.py | import random
from random import shuffle
import numpy as np
import ipdb
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
import model.meta.learner as Learner
import model.meta.modelfactory as mf
from model.optimizers_lib import optimizers_lib
from ast import literal_eval
"""
This baseline/ablation is constructed by merging C-MAML and BGD
By assigning a variance parameter to each NN parameter in the model
and using BGD's bayesian update to update these means (the NN parameters) and variances
(the learning rates in BGD are derived from the variances)
The 'n' bayesian samples in this case are the 'n' cumulative meta-losses sampled when
C-MAML is run with 'n' different initial theta vectors as the NN means sampled from the
(means, variances) stored for the model parameters.
The weight update is then carried out using the BGD formula that implicitly
uses the variances to derive the learning rates for the parameters
"""
class Net(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# define the lr params
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
# optimizer model
optimizer_model = optimizers_lib.__dict__[args.bgd_optimizer]
# params used to instantiate the BGD optimiser
optimizer_params = dict({ #"logger": logger,
"mean_eta": args.mean_eta,
"std_init": args.std_init,
"mc_iters": args.train_mc_iters}, **literal_eval(" ".join(args.optimizer_params)))
self.optimizer = optimizer_model(self.net, **optimizer_params)
self.epoch = 0
# allocate buffer
self.M = []
self.M_new = []
self.age = 0
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.pass_itr = 0
self.real_epoch = 0
# setup memories
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
if self.is_cifar:
self.nc_per_task = n_outputs / n_tasks
else:
self.nc_per_task = n_outputs
self.n_outputs = n_outputs
self.obseve_itr = 0
def take_multitask_loss(self, bt, t, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t, fast_weights=None):
self.optimizer.randomize_weights(force_std=0)
output = self.net.forward(x, vars=fast_weights)
if self.is_cifar:
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def meta_loss(self, x, fast_weights, y, bt, t):
"""
differentiate the loss through the network updates wrt alpha
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss_q = self.take_multitask_loss(bt, t, logits, y)
else:
logits = self.net.forward(x, fast_weights)
# Cross Entropy Loss over data
loss_q = self.loss(logits, y)
return loss_q, logits
def compute_offsets(self, task):
if self.is_cifar:
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
else:
offset1 = 0
offset2 = self.n_outputs
return int(offset1), int(offset2)
def push_to_mem(self, batch_x, batch_y, t):
"""
Reservoir sampling memory update
"""
if(self.real_epoch > 0 or self.pass_itr>0):
return
batch_x = batch_x.cpu()
batch_y = batch_y.cpu()
t = t.cpu()
for i in range(batch_x.shape[0]):
self.age += 1
if len(self.M_new) < self.memories:
self.M_new.append([batch_x[i], batch_y[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M_new[p] = [batch_x[i], batch_y[i], t]
def getBatch(self, x, y, t):
"""
Given the new data points, create a batch of old + new data,
where old data is part of the memory buffer
"""
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if self.args.use_old_task_memory: # and t>0:
MEM = self.M
else:
MEM = self.M_new
if len(MEM) > 0:
order = [i for i in range(0,len(MEM))]
osize = min(self.batchSize,len(MEM))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = MEM[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for j in range(len(myi)):
bxs.append(mxi[j])
bys.append(myi[j])
bts.append(mti[j])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def take_loss(self, t, logits, y):
offset1, offset2 = self.compute_offsets(t)
loss = self.loss(logits[:, offset1:offset2], y-offset1)
return loss
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.take_loss(t, logits, y)
# loss = self.loss(logits, y)
else:
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = True
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
# get fast weights vector by taking SGD step on grads
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
self.obseve_itr += 1
num_of_mc_iters = self.optimizer.get_mc_iters()
for glance_itr in range(self.glances):
mc_meta_losses = [0 for _ in range(num_of_mc_iters)]
# running C-MAML num_of_mc_iters times to get montecarlo samples of meta-loss
for pass_itr in range(num_of_mc_iters):
self.optimizer.randomize_weights()
self.pass_itr = pass_itr
self.epoch += 1
self.net.zero_grad()
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
if pass_itr==0 and glance_itr ==0 and t != self.current_task:
self.M = self.M_new
self.current_task = t
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
# the samples of new task to iterate over in inner update trajectory
iterate_till = 1 #batch_sz
meta_losses = [0 for _ in range(n_batches)]
accuracy_meta_set = [0 for _ in range(n_batches)]
# put some asserts to make sure replay batch size can accomodate old and new samples
bx, by = None, None
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
fast_weights = None
# inner loop/fast updates where learn on 1-2 samples in each inner step
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
if(pass_itr==0 and glance_itr==0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
# the meta loss is computed at each inner step
# as this is shown to work better in Reptile []
meta_loss, logits = self.meta_loss(bx, fast_weights, by, bt, t)
meta_losses[i] += meta_loss
self.optimizer.zero_grad()
meta_loss = sum(meta_losses)/len(meta_losses)
if torch.isnan(meta_loss):
ipdb.set_trace()
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
mc_meta_losses[pass_itr] = meta_loss
self.optimizer.aggregate_grads(batch_size=batch_sz)
print_std = False
if(self.obseve_itr%220==0):
print_std = True
self.optimizer.step(print_std = print_std)
meta_loss_return = sum(mc_meta_losses)/len(mc_meta_losses)
return meta_loss_return.item()
| 11,558 | 34.897516 | 121 | py |
La-MAML | La-MAML-main/model/gem.py | ### This is a copy of GEM from https://github.com/facebookresearch/GradientEpisodicMemory.
### In order to ensure complete reproducability, we do not change the file and treat it as a baseline.
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import numpy as np
import quadprog
# Auxiliary functions useful for GEM's inner optimization.
def compute_offsets(task, nc_per_task, is_cifar):
"""
Compute offsets for cifar to determine which
outputs to select for a given task.
"""
if is_cifar:
offset1 = task * nc_per_task
offset2 = (task + 1) * nc_per_task
else:
offset1 = 0
offset2 = nc_per_task
return offset1, offset2
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def project2cone2(gradient, memories, margin=0.5, eps = 1e-3):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
t = memories_np.shape[0]
P = np.dot(memories_np, memories_np.transpose())
P = 0.5 * (P + P.transpose()) + np.eye(t) * eps
q = np.dot(memories_np, gradient_np) * -1
G = np.eye(t)
h = np.zeros(t) + margin
v = quadprog.solve_qp(P, q, G, h)[0]
x = np.dot(v, memories_np) + gradient_np
gradient.copy_(torch.Tensor(x).view(-1, 1))
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.margin = args.memory_strength
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args=args)
self.netforward = self.net.forward
self.ce = nn.CrossEntropyLoss()
self.n_outputs = n_outputs
self.glances = args.glances
self.opt = optim.SGD(self.parameters(), args.lr)
self.n_memories = args.n_memories
self.gpu = args.cuda
# allocate episodic memory
self.memory_data = torch.FloatTensor(
n_tasks, self.n_memories, n_inputs)
self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
if args.cuda:
self.memory_data = self.memory_data.cuda()
self.memory_labs = self.memory_labs.cuda()
# allocate temporary synaptic memory
self.grad_dims = []
for param in self.parameters():
self.grad_dims.append(param.data.numel())
self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
if args.cuda:
self.grads = self.grads.cuda()
# allocate counters
self.observed_tasks = []
self.old_task = -1
self.mem_cnt = 0
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
if args.cuda:
self.cuda()
def forward(self, x, t):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
output = self.netforward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1 = int(t * self.nc_per_task)
offset2 = int((t + 1) * self.nc_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
"""
Function equivalent to a single fwd+bkwd loop on one batch
of x,y,t: inputs, labels, task IDs
for each batch of (x,y,t) input to this function
the upadte is done 'glnaces' number of times
since in the single-pass setting, this batch is seen only once
and so enough updates need to be made on each data point
"""
x = x.view(x.size(0), -1)
# update memory
if t != self.old_task:
self.observed_tasks.append(t)
self.old_task = t
# in the single-pass setting, take multiple glances over every batch
for pass_itr in range(self.glances):
# only make changes like pushing to buffer once per batch and not for every glance
if(pass_itr==0):
# Update ring buffer storing examples from current task
bsz = y.data.size(0)
endcnt = min(self.mem_cnt + bsz, self.n_memories)
effbsz = endcnt - self.mem_cnt
self.memory_data[t, self.mem_cnt: endcnt].copy_(
x.data[: effbsz])
if bsz == 1:
self.memory_labs[t, self.mem_cnt] = y.data[0]
else:
self.memory_labs[t, self.mem_cnt: endcnt].copy_(
y.data[: effbsz])
self.mem_cnt += effbsz
if self.mem_cnt == self.n_memories:
self.mem_cnt = 0
# compute gradient on previous tasks
if len(self.observed_tasks) > 1:
for tt in range(len(self.observed_tasks) - 1):
self.zero_grad()
# fwd/bwd on the examples in the memory
past_task = self.observed_tasks[tt]
offset1, offset2 = compute_offsets(past_task, self.nc_per_task,
self.is_cifar)
ptloss = self.ce(
self.forward(
Variable(self.memory_data[past_task]),
past_task)[:, offset1: offset2],
Variable(self.memory_labs[past_task] - offset1))
ptloss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
store_grad(self.parameters, self.grads, self.grad_dims,
past_task)
# now compute the grad on the current minibatch
self.zero_grad()
offset1, offset2 = compute_offsets(t, self.nc_per_task, self.is_cifar)
loss = self.ce(self.forward(x, t)[:, offset1: offset2], y - offset1)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# check if gradient violates constraints
if len(self.observed_tasks) > 1:
# copy gradient
store_grad(self.parameters, self.grads, self.grad_dims, t)
indx = torch.cuda.LongTensor(self.observed_tasks[:-1]) if self.gpu \
else torch.LongTensor(self.observed_tasks[:-1])
dotp = torch.mm(self.grads[:, t].unsqueeze(0),
self.grads.index_select(1, indx))
if (dotp < 0).sum() != 0:
project2cone2(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin)
# copy gradients back
overwrite_grad(self.parameters, self.grads[:, t],
self.grad_dims)
self.opt.step()
return loss.item()
| 9,366 | 36.468 | 112 | py |
La-MAML | La-MAML-main/model/lamaml_base.py | import random
from random import shuffle
import numpy as np
import ipdb
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
import model.meta.learner as Learner
import model.meta.modelfactory as mf
from scipy.stats import pearsonr
import datetime
class BaseNet(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(BaseNet, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# define the lr params
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.opt_wt = torch.optim.SGD(list(self.net.parameters()), lr=args.opt_wt)
self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()), lr=args.opt_lr)
self.epoch = 0
# allocate buffer
self.M = []
self.M_new = []
self.age = 0
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.pass_itr = 0
self.real_epoch = 0
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
self.n_outputs = n_outputs
def push_to_mem(self, batch_x, batch_y, t):
"""
Reservoir sampling to push subsampled stream
of data points to replay/memory buffer
"""
if(self.real_epoch > 0 or self.pass_itr>0):
return
batch_x = batch_x.cpu()
batch_y = batch_y.cpu()
t = t.cpu()
for i in range(batch_x.shape[0]):
self.age += 1
if len(self.M_new) < self.memories:
self.M_new.append([batch_x[i], batch_y[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M_new[p] = [batch_x[i], batch_y[i], t]
def getBatch(self, x, y, t, batch_size=None):
"""
Given the new data points, create a batch of old + new data,
where old data is sampled from the memory buffer
"""
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if self.args.use_old_task_memory and t>0:
MEM = self.M
else:
MEM = self.M_new
batch_size = self.batchSize if batch_size is None else batch_size
if len(MEM) > 0:
order = [i for i in range(0,len(MEM))]
osize = min(batch_size,len(MEM))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = MEM[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for j in range(len(myi)):
bxs.append(mxi[j])
bys.append(myi[j])
bts.append(mti[j])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def compute_offsets(self, task):
# mapping from classes [1-100] to their idx within a task
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def zero_grads(self):
if self.args.learn_lr:
self.opt_lr.zero_grad()
self.opt_wt.zero_grad()
self.net.zero_grad()
self.net.alpha_lr.zero_grad() | 4,545 | 29.10596 | 112 | py |
La-MAML | La-MAML-main/model/lamaml_cifar.py | import random
import numpy as np
import ipdb
import math
import torch
import torch.nn as nn
from model.lamaml_base import *
class Net(BaseNet):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__(n_inputs,
n_outputs,
n_tasks,
args)
self.nc_per_task = n_outputs / n_tasks
def take_loss(self, t, logits, y):
# compute loss on data from a single task
offset1, offset2 = self.compute_offsets(t)
loss = self.loss(logits[:, offset1:offset2], y-offset1)
return loss
def take_multitask_loss(self, bt, t, logits, y):
# compute loss on data from a multiple tasks
# separate from take_loss() since the output positions for each task's
# logit vector are different and we nly want to compute loss on the relevant positions
# since this is a task incremental setting
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def meta_loss(self, x, fast_weights, y, bt, t):
"""
differentiate the loss through the network updates wrt alpha
"""
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss_q = self.take_multitask_loss(bt, t, logits, y)
return loss_q, logits
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.take_loss(t, logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
# NOTE if we want higher order grads to be allowed, change create_graph=False to True
graph_required = self.args.second_order
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights
def observe(self, x, y, t):
self.net.train()
for pass_itr in range(self.glances):
self.pass_itr = pass_itr
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
self.epoch += 1
self.zero_grads()
if t != self.current_task:
self.M = self.M_new.copy()
self.current_task = t
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
fast_weights = None
meta_losses = [0 for _ in range(n_batches)]
# get a batch by augmented incming data with old task data, used for
# computing meta-loss
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
# assuming labels for inner update are from the same
fast_weights = self.inner_update(batch_x, fast_weights, batch_y, t)
# only sample and push to replay buffer once for each task's stream
# instead of pushing every epoch
if(self.real_epoch == 0):
self.push_to_mem(batch_x, batch_y, torch.tensor(t))
meta_loss, logits = self.meta_loss(bx, fast_weights, by, bt, t)
meta_losses[i] += meta_loss
# Taking the meta gradient step (will update the learning rates)
self.zero_grads()
meta_loss = sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
if self.args.learn_lr:
self.opt_lr.step()
# if sync-update is being carried out (as in sync-maml) then update the weights using the optimiser
# otherwise update the weights with sgd using updated LRs as step sizes
if(self.args.sync_update):
self.opt_wt.step()
else:
for i,p in enumerate(self.net.parameters()):
# using relu on updated LRs to avoid negative values
p.data = p.data - p.grad * nn.functional.relu(self.net.alpha_lr[i])
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return meta_loss.item()
| 5,732 | 35.987097 | 119 | py |
La-MAML | La-MAML-main/model/agem.py | ### This is a pytorch implementation of AGEM based on https://github.com/facebookresearch/agem.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import ipdb
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import numpy as np
import random
# Auxiliary functions useful for AGEM's inner optimization.
def compute_offsets(task, nc_per_task, is_cifar):
"""
Compute offsets for cifar to determine which
outputs to select for a given task.
"""
if is_cifar:
offset1 = task * nc_per_task
offset2 = (task + 1) * nc_per_task
else:
offset1 = 0
offset2 = nc_per_task
return offset1, offset2
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def projectgrad(gradient, memories, margin=0.5, eps = 1e-3, oiter = 0):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
similarity = torch.nn.functional.cosine_similarity(gradient.t(), memories.t().mean(dim=0).unsqueeze(0))
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
# merge memories
t = memories_np.shape[0]
memories_np2 = memories_np.mean(axis=0).reshape(1, memories_np.shape[1])
ref_mag = np.dot(memories_np2, memories_np2.transpose())
dotp = np.dot(gradient_np.reshape(1, -1), memories_np2.transpose())
if(oiter%100==0):
print('similarity : ', similarity.item())
print('dotp:', dotp)
if(dotp[0,0]<0):
proj = gradient_np.reshape(1, -1) - ((dotp/ ref_mag) * memories_np2)
gradient.copy_(torch.Tensor(proj).view(-1, 1))
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
self.margin = args.memory_strength
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
self.ce = nn.CrossEntropyLoss()
self.bce = torch.nn.CrossEntropyLoss()
self.n_outputs = n_outputs
self.glances = args.glances
self.opt = optim.SGD(self.parameters(), args.lr)
self.n_memories = args.n_memories
self.gpu = args.cuda
self.age = 0
self.M = []
self.memories = args.memories
self.grad_align = []
self.grad_task_align = {}
self.current_task = 0
# allocate episodic memory
self.memory_data = torch.FloatTensor(
n_tasks, self.n_memories, n_inputs)
self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
if args.cuda:
self.memory_data = self.memory_data.cuda()
self.memory_labs = self.memory_labs.cuda()
# allocate temporary synaptic memory
self.grad_dims = []
for param in self.parameters():
self.grad_dims.append(param.data.numel())
self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
if args.cuda:
self.grads = self.grads.cuda()
# allocate counters
self.observed_tasks = []
self.mem_cnt = 0
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
if args.cuda:
self.cuda()
self.iter = 0
def forward(self, x, t):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
output = self.net.forward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1 = int(t * self.nc_per_task)
offset2 = int((t + 1) * self.nc_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
self.iter +=1
x = x.view(x.size(0), -1)
# update memory
if t != self.current_task:
self.observed_tasks.append(t)
self.current_task = t
self.grad_align.append([])
for pass_itr in range(self.glances):
if(pass_itr==0):
# Update ring buffer storing examples from current task
bsz = y.data.size(0)
endcnt = min(self.mem_cnt + bsz, self.n_memories)
effbsz = endcnt - self.mem_cnt
self.memory_data[t, self.mem_cnt: endcnt].copy_(
x.data[: effbsz])
if bsz == 1:
self.memory_labs[t, self.mem_cnt] = y.data[0]
else:
self.memory_labs[t, self.mem_cnt: endcnt].copy_(
y.data[: effbsz])
self.mem_cnt += effbsz
if self.mem_cnt == self.n_memories:
self.mem_cnt = 0
# compute gradient on previous tasks
if len(self.observed_tasks) > 1:
for tt in range(len(self.observed_tasks) - 1):
self.zero_grad()
# fwd/bwd on the examples in the memory
past_task = self.observed_tasks[tt]
offset1, offset2 = compute_offsets(past_task, self.nc_per_task,
self.is_cifar)
ptloss = self.ce(
self.forward(
Variable(self.memory_data[past_task]),
past_task)[:, offset1: offset2],
Variable(self.memory_labs[past_task] - offset1))
ptloss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
store_grad(self.parameters, self.grads, self.grad_dims,
past_task)
# now compute the grad on the current minibatch
self.zero_grad()
offset1, offset2 = compute_offsets(t, self.nc_per_task, self.is_cifar)
loss = self.ce(self.forward(x, t)[:, offset1: offset2], y - offset1)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# check if gradient violates constraints
if len(self.observed_tasks) > 1:
# copy gradient
store_grad(self.parameters, self.grads, self.grad_dims, t)
indx = torch.cuda.LongTensor(self.observed_tasks[:-1]) if self.gpu \
else torch.LongTensor(self.observed_tasks[:-1])
projectgrad(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin, oiter = self.iter)
# copy gradients back
overwrite_grad(self.parameters, self.grads[:, t],
self.grad_dims)
self.opt.step()
xi = x.data.cpu().numpy()
yi = y.data.cpu().numpy()
for i in range(0,x.size()[0]):
self.age += 1
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi[i],yi[i],t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi[i],yi[i],t]
return loss.item()
| 9,569 | 34.576208 | 112 | py |
La-MAML | La-MAML-main/model/meralg1.py | # An implementation of MER Algorithm 1 from https://openreview.net/pdf?id=B1gTShAct7
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import random
from torch.nn.modules.loss import CrossEntropyLoss
from random import shuffle
import sys
import ipdb
from copy import deepcopy
import warnings
import model.meta.learner as Learner
import model.meta.modelfactory as mf
warnings.filterwarnings("ignore")
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
self.is_cifar = (args.dataset == 'cifar100' or args.dataset == 'tinyimagenet')
config = mf.ModelFactory.get_model(args.arch, sizes=[n_inputs] + [nh] * nl + [n_outputs], dataset=args.dataset, args=args)
self.net = Learner.Learner(config, args=args)
self.netforward = self.net.forward
self.bce = torch.nn.CrossEntropyLoss()
self.n_outputs = n_outputs
if self.is_cifar:
self.nc_per_task = n_outputs / n_tasks
else:
self.nc_per_task = n_outputs
self.opt = optim.SGD(self.parameters(), args.lr)
self.batchSize = int(args.replay_batch_size)
self.memories = args.memories
self.steps = int(args.batches_per_example)
self.beta = args.beta
self.gamma = args.gamma
# allocate buffer
self.M = []
self.age = 0
# handle gpus if specified
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
def forward(self, x, t):
output = self.netforward(x)
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def compute_offsets(self, task):
if self.is_cifar:
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
else:
offset1 = 0
offset2 = self.n_outputs
return int(offset1), int(offset2)
def getBatch(self,x,y,t):
if(x is not None):
xi = Variable(torch.from_numpy(np.array(x))).float().unsqueeze(0) #.view(1,-1)
yi = Variable(torch.from_numpy(np.array(y))).long()
ti = Variable(torch.from_numpy(np.array(t))).long()
if self.cuda:
xi = xi.cuda()
yi = yi.cuda()
ti = ti.cuda()
bxs = [xi]
bys = [yi]
bts = [ti]
else:
bxs = []
bys = []
bts = []
if len(self.M) > 0:
order = [i for i in range(0,len(self.M))]
osize = min(self.batchSize,len(self.M))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = self.M[k]
xi = Variable(torch.from_numpy(np.array(x))).float().unsqueeze(0) #.view(1,-1)
yi = Variable(torch.from_numpy(np.array(y))).long()
ti = Variable(torch.from_numpy(np.array(t))).long()
# handle gpus if specified
if self.cuda:
xi = xi.cuda()
yi = yi.cuda()
ti = ti.cuda()
bxs.append(xi)
bys.append(yi)
bts.append(ti)
return bxs,bys,bts
def observe(self, x, y, t):
# step through elements of x
for i in range(0,x.size()[0]):
self.age += 1
xi = x[i].data.cpu().numpy()
yi = y[i].data.cpu().numpy()
self.net.zero_grad()
before = deepcopy(self.net.state_dict())
for step in range(0,self.steps):
weights_before = deepcopy(self.net.state_dict())
##Check for nan
if weights_before != weights_before:
ipdb.set_trace()
# Draw batch from buffer:
bxs, bys, bts = self.getBatch(xi,yi,t)
loss = 0.0
total_loss = 0.0
for idx in range(len(bxs)):
self.net.zero_grad()
bx = bxs[idx]
by = bys[idx]
bt = bts[idx]
if self.is_cifar:
offset1, offset2 = self.compute_offsets(bt)
prediction = (self.netforward(bx)[:, offset1:offset2])
loss = self.bce(prediction,
by.unsqueeze(0)-offset1)
else:
prediction = self.forward(bx,0)
loss = self.bce(prediction, by.unsqueeze(0))
if torch.isnan(loss):
ipdb.set_trace()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt.step()
total_loss += loss.item()
weights_after = self.net.state_dict()
if weights_after != weights_after:
ipdb.set_trace()
# Within batch Reptile meta-update:
self.net.load_state_dict({name : weights_before[name] + ((weights_after[name] - weights_before[name]) * self.beta) for name in weights_before})
after = self.net.state_dict()
# Across batch Reptile meta-update:
self.net.load_state_dict({name : before[name] + ((after[name] - before[name]) * self.gamma) for name in before})
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi,yi,t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi,yi,t]
return total_loss/self.steps
| 6,478 | 31.888325 | 159 | py |
La-MAML | La-MAML-main/model/iid2.py | import torch
import numpy as np
import random
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import ipdb
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("once")
"""
Multi task
big batch size, set increment 100 so that it is treated as 1 task with all classes in the dataset
inference time for acc eval, use offsets
"""
class Net(torch.nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.nt = n_tasks
self.n_feat = n_outputs
self.n_classes = n_outputs
arch = args.arch
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# setup optimizer
self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)
# setup losses
self.loss = torch.nn.CrossEntropyLoss()
self.gpu = args.cuda
self.nc_per_task = int(n_outputs / n_tasks)
self.n_outputs = n_outputs
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def take_multitask_loss(self, bt, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
# make sure we predict classes within the current task
if torch.unique(t).shape[0] == 1:
offset1, offset2 = self.compute_offsets(t[0].item())
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, int(offset2):self.n_outputs].data.fill_(-10e10)
else:
for i in range(len(t)):
offset1, offset2 = self.compute_offsets(t[i])
if offset1 > 0:
output[i, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[i, int(offset2):self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
self.net.train()
self.net.zero_grad()
logits = self.net.forward(x)
loss = self.take_multitask_loss(t, logits, y)
loss.backward()
self.opt.step()
return loss.item() | 2,878 | 30.637363 | 107 | py |
La-MAML | La-MAML-main/model/eralg4.py | # An implementation of Experience Replay (ER) with reservoir sampling and without using tasks from Algorithm 4 of https://openreview.net/pdf?id=B1gTShAct7
# Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import random
from torch.nn.modules.loss import CrossEntropyLoss
from random import shuffle
import sys
import warnings
import math
import model.meta.modelfactory as mf
import model.meta.learner as Learner
warnings.filterwarnings("ignore")
class Net(nn.Module):
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
self.opt_wt = optim.SGD(self.parameters(), lr=args.lr)
if self.args.learn_lr:
self.net.define_task_lr_params(alpha_init = args.alpha_init)
self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()), lr=args.opt_lr)
self.loss = CrossEntropyLoss()
self.is_cifar = ((args.dataset == 'cifar100') or (args.dataset == 'tinyimagenet'))
self.glances = args.glances
self.current_task = 0
self.memories = args.memories
self.batchSize = int(args.replay_batch_size)
# allocate buffer
self.M = []
self.age = 0
# handle gpus if specified
self.cuda = args.cuda
if self.cuda:
self.net = self.net.cuda()
self.n_outputs = n_outputs
if self.is_cifar:
self.nc_per_task = int(n_outputs / n_tasks)
else:
self.nc_per_task = n_outputs
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def take_multitask_loss(self, bt, logits, y):
loss = 0.0
for i, ti in enumerate(bt):
offset1, offset2 = self.compute_offsets(ti)
loss += self.loss(logits[i, offset1:offset2].unsqueeze(0), y[i].unsqueeze(0)-offset1)
return loss/len(bt)
def forward(self, x, t):
output = self.net.forward(x)
if self.is_cifar:
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def getBatch(self, x, y, t):
if(x is not None):
mxi = np.array(x)
myi = np.array(y)
mti = np.ones(x.shape[0], dtype=int)*t
else:
mxi = np.empty( shape=(0, 0) )
myi = np.empty( shape=(0, 0) )
mti = np.empty( shape=(0, 0) )
bxs = []
bys = []
bts = []
if len(self.M) > 0:
order = [i for i in range(0,len(self.M))]
osize = min(self.batchSize,len(self.M))
for j in range(0,osize):
shuffle(order)
k = order[j]
x,y,t = self.M[k]
xi = np.array(x)
yi = np.array(y)
ti = np.array(t)
bxs.append(xi)
bys.append(yi)
bts.append(ti)
for i in range(len(myi)):
bxs.append(mxi[i])
bys.append(myi[i])
bts.append(mti[i])
bxs = Variable(torch.from_numpy(np.array(bxs))).float()
bys = Variable(torch.from_numpy(np.array(bys))).long().view(-1)
bts = Variable(torch.from_numpy(np.array(bts))).long().view(-1)
# handle gpus if specified
if self.cuda:
bxs = bxs.cuda()
bys = bys.cuda()
bts = bts.cuda()
return bxs,bys,bts
def observe(self, x, y, t):
### step through elements of x
xi = x.data.cpu().numpy()
yi = y.data.cpu().numpy()
if t != self.current_task:
self.current_task = t
if self.args.learn_lr:
loss = self.la_ER(x, y, t)
else:
loss = self.ER(xi, yi, t)
for i in range(0, x.size()[0]):
self.age += 1
# Reservoir sampling memory update:
if len(self.M) < self.memories:
self.M.append([xi[i], yi[i], t])
else:
p = random.randint(0,self.age)
if p < self.memories:
self.M[p] = [xi[i], yi[i], t]
return loss.item()
def ER(self, x, y, t):
for pass_itr in range(self.glances):
self.net.zero_grad()
# Draw batch from buffer:
bx,by,bt = self.getBatch(x,y,t)
bx = bx.squeeze()
prediction = self.net.forward(bx)
loss = self.take_multitask_loss(bt, prediction, by)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt_wt.step()
return loss
def inner_update(self, x, fast_weights, y, t):
"""
Update the fast weights using the current samples and return the updated fast
"""
if self.is_cifar:
offset1, offset2 = self.compute_offsets(t)
logits = self.net.forward(x, fast_weights)[:, :offset2]
loss = self.loss(logits[:, offset1:offset2], y-offset1)
else:
logits = self.net.forward(x, fast_weights)
loss = self.loss(logits, y)
if fast_weights is None:
fast_weights = self.net.parameters()
graph_required = self.args.second_order
grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required))
for i in range(len(grads)):
grads[i] = torch.clamp(grads[i], min = -self.args.grad_clip_norm, max = self.args.grad_clip_norm)
fast_weights = list(
map(lambda p: p[1][0] - p[0] * p[1][1], zip(grads, zip(fast_weights, self.net.alpha_lr))))
return fast_weights, loss.item()
def la_ER(self, x, y, t):
"""
this ablation tests whether it suffices to just do the learning rate modulation
guided by gradient alignment + clipping (that La-MAML does implciitly through autodiff)
and use it with ER (therefore no meta-learning for the weights)
"""
for pass_itr in range(self.glances):
perm = torch.randperm(x.size(0))
x = x[perm]
y = y[perm]
batch_sz = x.shape[0]
n_batches = self.args.cifar_batches
rough_sz = math.ceil(batch_sz/n_batches)
fast_weights = None
meta_losses = [0 for _ in range(n_batches)]
bx, by, bt = self.getBatch(x.cpu().numpy(), y.cpu().numpy(), t)
bx = bx.squeeze()
for i in range(n_batches):
batch_x = x[i*rough_sz : (i+1)*rough_sz]
batch_y = y[i*rough_sz : (i+1)*rough_sz]
# assuming labels for inner update are from the same
fast_weights, inner_loss = self.inner_update(batch_x, fast_weights, batch_y, t)
prediction = self.net.forward(bx, fast_weights)
meta_loss = self.take_multitask_loss(bt, prediction, by)
meta_losses[i] += meta_loss
# update alphas
self.net.zero_grad()
self.opt_lr.zero_grad()
meta_loss = meta_losses[-1] #sum(meta_losses)/len(meta_losses)
meta_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
torch.nn.utils.clip_grad_norm_(self.net.alpha_lr.parameters(), self.args.grad_clip_norm)
# update the LRs (guided by meta-loss, but not the weights)
self.opt_lr.step()
# update weights
self.net.zero_grad()
# compute ER loss for network weights
prediction = self.net.forward(bx)
loss = self.take_multitask_loss(bt, prediction, by)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
# update weights with grad from simple ER loss
# and LRs obtained from meta-loss guided by old and new tasks
for i,p in enumerate(self.net.parameters()):
p.data = p.data - (p.grad * nn.functional.relu(self.net.alpha_lr[i]))
self.net.zero_grad()
self.net.alpha_lr.zero_grad()
return loss | 9,335 | 32.342857 | 154 | py |
La-MAML | La-MAML-main/model/icarl.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import random
import model.meta.learner as Learner
import model.meta.modelfactory as mf
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("once")
class Net(torch.nn.Module):
# Re-implementation of
# S.-A. Rebuffi, A. Kolesnikov, G. Sperl, and C. H. Lampert.
# iCaRL: Incremental classifier and representation learning.
# CVPR, 2017.
def __init__(self,
n_inputs,
n_outputs,
n_tasks,
args):
super(Net, self).__init__()
self.args = args
self.nt = n_tasks
self.reg = args.memory_strength
self.n_memories = args.n_memories
self.num_exemplars = 0
self.n_feat = n_outputs
self.n_classes = n_outputs
self.samples_per_task = args.samples_per_task * (1.0 - args.validation)
if self.samples_per_task <= 0:
error('set explicitly args.samples_per_task')
self.examples_seen = 0
self.glances = args.glances
# setup network
nl, nh = args.n_layers, args.n_hiddens
config = mf.ModelFactory.get_model(model_type = args.arch, sizes = [n_inputs] + [nh] * nl + [n_outputs],
dataset = args.dataset, args=args)
self.net = Learner.Learner(config, args)
# setup optimizer
self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)
# setup losses
self.bce = torch.nn.CrossEntropyLoss()
self.kl = torch.nn.KLDivLoss() # for distillation
self.lsm = torch.nn.LogSoftmax(dim=1)
self.sm = torch.nn.Softmax(dim=1)
# memory
self.memx = None # stores raw inputs, PxD
self.memy = None
self.mem_class_x = {} # stores exemplars class by class
self.mem_class_y = {}
self.gpu = args.cuda
self.nc_per_task = int(n_outputs / n_tasks)
self.n_outputs = n_outputs
def netforward(self, x):
if self.args.dataset == 'tinyimagenet':
x = x.view(-1, 3, 64, 64)
elif self.args.dataset == 'cifar100':
x = x.view(-1, 3, 32, 32)
return self.net.forward(x)
def compute_offsets(self, task):
offset1 = task * self.nc_per_task
offset2 = (task + 1) * self.nc_per_task
return int(offset1), int(offset2)
def forward(self, x, t):
# nearest neighbor
nd = self.n_feat
ns = x.size(0)
if t * self.nc_per_task not in self.mem_class_x.keys():
# no exemplar in memory yet, output uniform distr. over classes in
# task t above, we check presence of first class for this task, we
# should check them all
out = torch.Tensor(ns, self.n_classes).fill_(-10e10)
out[:, int(t * self.nc_per_task): int((t + 1) * self.nc_per_task)].fill_(
1.0 / self.nc_per_task)
if self.gpu:
out = out.cuda()
return out
means = torch.ones(self.nc_per_task, nd) * float('inf')
if self.gpu:
means = means.cuda()
offset1, offset2 = self.compute_offsets(t)
for cc in range(offset1, offset2):
means[cc -
offset1] =self.netforward(self.mem_class_x[cc]).data.mean(0)
classpred = torch.LongTensor(ns)
preds = self.netforward(x).data.clone()
for ss in range(ns):
dist = (means - preds[ss].expand(self.nc_per_task, nd)).norm(2, 1)
_, ii = dist.min(0)
ii = ii.squeeze()
classpred[ss] = ii.item() + offset1
out = torch.zeros(ns, self.n_classes)
if self.gpu:
out = out.cuda()
for ss in range(ns):
out[ss, classpred[ss]] = 1
return out # return 1-of-C code, ns x nc
def forward_training(self, x, t):
output = self.netforward(x)
# make sure we predict classes within the current task
offset1, offset2 = self.compute_offsets(t)
# zero out all the logits outside the task's range
# since the output vector from the model is of dimension (num_tasks * num_classes_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:self.n_outputs].data.fill_(-10e10)
return output
def observe(self, x, y, t):
x = x.view(x.size(0), -1)
self.net.train()
for pass_itr in range(self.glances):
# only make changes like pushing to buffer once per batch and not for every glance
if(pass_itr==0):
self.examples_seen += x.size(0)
if self.examples_seen < self.samples_per_task:
if self.memx is None:
self.memx = x.data.clone()
self.memy = y.data.clone()
else:
self.memx = torch.cat((self.memx, x.data.clone()))
self.memy = torch.cat((self.memy, y.data.clone()))
self.net.zero_grad()
offset1, offset2 = self.compute_offsets(t)
loss = self.bce((self.netforward(x)[:, offset1: offset2]),
y - offset1)
if self.num_exemplars > 0:
# distillation
for tt in range(t):
# first generate a minibatch with one example per class from
# previous tasks
inp_dist = torch.zeros(self.nc_per_task, x.size(1))
target_dist = torch.zeros(self.nc_per_task, self.n_feat)
offset1, offset2 = self.compute_offsets(tt)
if self.gpu:
inp_dist = inp_dist.cuda()
target_dist = target_dist.cuda()
for cc in range(self.nc_per_task):
indx = random.randint(0, len(self.mem_class_x[cc + offset1]) - 1)
inp_dist[cc] = self.mem_class_x[cc + offset1][indx].clone()
target_dist[cc] = self.mem_class_y[cc +
offset1][indx].clone()
# Add distillation loss
loss += self.reg * self.kl(
self.lsm(self.netforward(inp_dist)
[:, offset1: offset2]),
self.sm(target_dist[:, offset1: offset2])) * self.nc_per_task
# bprop and update
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)
self.opt.step()
# check whether this is the last minibatch of the current task
# We assume only 1 epoch!
if self.examples_seen == self.args.n_epochs * self.samples_per_task:
self.examples_seen = 0
# get labels from previous task; we assume labels are consecutive
if self.gpu:
all_labs = torch.LongTensor(np.unique(self.memy.cpu().numpy()))
else:
all_labs = torch.LongTensor(np.unique(self.memy.numpy()))
num_classes = all_labs.size(0)
assert(num_classes == self.nc_per_task)
# Reduce exemplar set by updating value of num. exemplars per class
self.num_exemplars = int(self.n_memories /
(num_classes + len(self.mem_class_x.keys())))
offset1, offset2 = self.compute_offsets(t)
for ll in range(num_classes):
lab = all_labs[ll].cuda()
indxs = (self.memy == lab).nonzero().squeeze()
cdata = self.memx.index_select(0, indxs)
# Construct exemplar set for last task
mean_feature = self.netforward(cdata)[
:, offset1: offset2].data.clone().mean(0)
nd = self.nc_per_task
exemplars = torch.zeros(self.num_exemplars, x.size(1))
if self.gpu:
exemplars = exemplars.cuda()
ntr = cdata.size(0)
# used to keep track of which examples we have already used
taken = torch.zeros(ntr)
model_output = self.netforward(cdata)[
:, offset1: offset2].data.clone()
for ee in range(self.num_exemplars):
prev = torch.zeros(1, nd)
if self.gpu:
prev = prev.cuda()
if ee > 0:
prev = self.netforward(exemplars[:ee])[
:, offset1: offset2].data.clone().sum(0)
cost = (mean_feature.expand(ntr, nd) - (model_output
+ prev.expand(ntr, nd)) / (ee + 1)).norm(2, 1).squeeze()
_, indx = cost.sort(0)
winner = 0
while winner < indx.size(0) and taken[indx[winner]] == 1:
winner += 1
if winner < indx.size(0):
taken[indx[winner]] = 1
exemplars[ee] = cdata[indx[winner]].clone()
else:
exemplars = exemplars[:indx.size(0), :].clone()
self.num_exemplars = indx.size(0)
break
# update memory with exemplars
self.mem_class_x[lab.item()] = exemplars.clone()
# recompute outputs for distillation purposes
for cc in self.mem_class_x.keys():
self.mem_class_x[cc] = self.mem_class_x[cc][:self.num_exemplars]
self.mem_class_y[cc] = self.netforward(
self.mem_class_x[cc]).data.clone()
self.memx = None
self.memy = None
print(len(self.mem_class_x[0]))
return loss.item() | 10,217 | 40.536585 | 116 | py |
La-MAML | La-MAML-main/model/meta/learner.py | import math
import os
import sys
import traceback
import numpy as np
import ipdb
import torch
from torch import nn
from torch.nn import functional as F
class Learner(nn.Module):
def __init__(self, config, args = None):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
self.tf_counter = 0
self.args = args
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
self.names = []
for i, (name, param, extra_name) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
if(self.args.xav_init):
w = nn.Parameter(torch.ones(*param[:4]))
b = nn.Parameter(torch.zeros(param[0]))
torch.nn.init.xavier_normal_(w.data)
b.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*b.data.shape[0]))
self.vars.append(w)
self.vars.append(b)
else:
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# layer += 1
if(self.args.xav_init):
w = nn.Parameter(torch.ones(*param))
# b = nn.Parameter(torch.zeros(param[0]))
torch.nn.init.xavier_normal_(w.data)
# b.data.normal_(0, math.sqrt(2)/math.sqrt(1+9*b.data.shape[0]))
self.vars.append(w)
# self.vars.append(b)
else:
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'cat':
pass
elif name is 'cat_start':
pass
elif name is "rep":
pass
elif name in ["residual3", "residual5", "in"]:
pass
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param, extra_name in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)' \
% (param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)' \
% (param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)' % (param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)' % (param[0])
info += tmp + '\n'
elif name is 'cat':
tmp = 'cat'
info += tmp + "\n"
elif name is 'cat_start':
tmp = 'cat_start'
info += tmp + "\n"
elif name is 'rep':
tmp = 'rep'
info += tmp + "\n"
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)' % (param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)' % (param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=False, feature=False):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
cat_var = False
cat_list = []
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
try:
for (name, param, extra_name) in self.config:
# assert(name == "conv2d")
if name == 'conv2d':
w, b = vars[idx], vars[idx + 1]
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'convt2d':
w, b = vars[idx], vars[idx + 1]
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
elif name == 'linear':
# ipdb.set_trace()
if extra_name == 'cosine':
w = F.normalize(vars[idx])
x = F.normalize(x)
x = F.linear(x, w)
idx += 1
else:
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
if cat_var:
cat_list.append(x)
elif name == 'rep':
# print('rep')
# print(x.shape)
if feature:
return x
elif name == "cat_start":
cat_var = True
cat_list = []
elif name == "cat":
cat_var = False
x = torch.cat(cat_list, dim=1)
elif name == 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx + 1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name == 'flatten':
# print('flatten')
# print(x.shape)
x = x.view(x.size(0), -1)
elif name == 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name == 'relu':
x = F.relu(x, inplace=param[0])
elif name == 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name == 'tanh':
x = F.tanh(x)
elif name == 'sigmoid':
x = torch.sigmoid(x)
elif name == 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name == 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name == 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
print(name)
raise NotImplementedError
except:
traceback.print_exc(file=sys.stdout)
ipdb.set_trace()
# make sure variable is used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def define_task_lr_params(self, alpha_init=1e-3):
# Setup learning parameters
self.alpha_lr = nn.ParameterList([])
self.lr_name = []
for n, p in self.named_parameters():
self.lr_name.append(n)
for p in self.parameters():
self.alpha_lr.append(nn.Parameter(alpha_init * torch.ones(p.shape, requires_grad=True)))
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars
| 10,679 | 34.364238 | 143 | py |
La-MAML | La-MAML-main/model/optimizers_lib/bgd_optimizer.py | import torch
from torch.optim.optimizer import Optimizer
class BGD(Optimizer):
"""Implements BGD.
A simple usage of BGD would be:
for samples, labels in batches:
for mc_iter in range(mc_iters):
optimizer.randomize_weights()
output = model.forward(samples)
loss = cirterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.aggregate_grads()
optimizer.step()
"""
def __init__(self, params, std_init, mean_eta=1, mc_iters=10):
"""
Initialization of BGD optimizer
group["mean_param"] is the learned mean.
group["std_param"] is the learned STD.
:param params: List of model parameters
:param std_init: Initialization value for STD parameter
:param mean_eta: Eta value
:param mc_iters: Number of Monte Carlo iteration. Used for correctness check.
Use None to disable the check.
"""
super(BGD, self).__init__(params, defaults={})
assert mc_iters is None or (type(mc_iters) == int and mc_iters > 0), "mc_iters should be positive int or None."
self.std_init = std_init
self.mean_eta = mean_eta
self.mc_iters = mc_iters
# Initialize mu (mean_param) and sigma (std_param)
for group in self.param_groups:
assert len(group["params"]) == 1, "BGD optimizer does not support multiple params in a group"
# group['params'][0] is the weights
assert isinstance(group["params"][0], torch.Tensor), "BGD expect param to be a tensor"
# We use the initialization of weights to initialize the mean.
group["mean_param"] = group["params"][0].data.clone()
group["std_param"] = torch.zeros_like(group["params"][0].data).add_(self.std_init)
self._init_accumulators()
def get_mc_iters(self):
return self.mc_iters
def _init_accumulators(self):
self.mc_iters_taken = 0
for group in self.param_groups:
group["eps"] = None
group["grad_mul_eps_sum"] = torch.zeros_like(group["params"][0].data).cuda()
group["grad_sum"] = torch.zeros_like(group["params"][0].data).cuda()
def randomize_weights(self, force_std=-1):
"""
Randomize the weights according to N(mean, std).
:param force_std: If force_std>=0 then force_std is used for STD instead of the learned STD.
:return: None
"""
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
if force_std >= 0:
std = std.mul(0).add(force_std)
group["eps"] = torch.normal(torch.zeros_like(mean), 1).cuda()
# Reparameterization trick (here we set the weights to their randomized value):
group["params"][0].data.copy_(mean.add(std.mul(group["eps"])))
def aggregate_grads(self, batch_size):
"""
Aggregates a single Monte Carlo iteration gradients. Used in step() for the expectations calculations.
optimizer.zero_grad() should be used before calling .backward() once again.
:param batch_size: BGD is using non-normalized gradients, but PyTorch gives normalized gradients.
Therefore, we multiply the gradients by the batch size.
:return: None
"""
self.mc_iters_taken += 1
groups_cnt = 0
for group in self.param_groups:
if group["params"][0].grad is None:
continue
assert group["eps"] is not None, "Must randomize weights before using aggregate_grads"
groups_cnt += 1
grad = group["params"][0].grad.data.mul(batch_size)
group["grad_sum"].add_(grad)
group["grad_mul_eps_sum"].add_(grad.mul(group["eps"]))
group["eps"] = None
assert groups_cnt > 0, "Called aggregate_grads, but all gradients were None. Make sure you called .backward()"
def step(self, closure=None, print_std = False):
"""
Updates the learned mean and STD.
:return:
"""
# Makes sure that self.mc_iters had been taken.
assert self.mc_iters is None or self.mc_iters == self.mc_iters_taken, "MC iters is set to " \
+ str(self.mc_iters) \
+ ", but took " + \
str(self.mc_iters_taken) + " MC iters"
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
# Divide gradients by MC iters to get expectation
e_grad = group["grad_sum"].div(self.mc_iters_taken)
e_grad_eps = group["grad_mul_eps_sum"].div(self.mc_iters_taken)
# Update mean and STD params
mean.add_(-std.pow(2).mul(e_grad).mul(self.mean_eta))
sqrt_term = torch.sqrt(e_grad_eps.mul(std).div(2).pow(2).add(1)).mul(std)
std.copy_(sqrt_term.add(-e_grad_eps.mul(std.pow(2)).div(2)))
self.randomize_weights(force_std=0)
self._init_accumulators() | 5,328 | 46.580357 | 119 | py |
La-MAML | La-MAML-main/model/optimizers_lib/optimizers_lib.py | import torch.optim as optim
from .bgd_optimizer import BGD
def bgd(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
bgd_params = {
"mean_eta": kwargs.get("mean_eta", 1),
"std_init": kwargs.get("std_init", 0.02),
"mc_iters": kwargs.get("mc_iters", 10)
}
# logger.info("BGD params: " + str(bgd_params))
all_params = [{'params': params} for l, (name, params) in enumerate(model.named_parameters())]
return BGD(all_params, **bgd_params)
def sgd(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
sgd_params = {
"momentum": kwargs.get("momentum", 0.9),
"lr": kwargs.get("lr", 0.1),
"weight_decay": kwargs.get("weight_decay", 5e-4)
}
# logger.info("SGD params: " + str(sgd_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.1)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.SGD(all_params, **sgd_params)
def adam(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
adam_params = {
"eps": kwargs.get("eps", 1e-08),
"lr": kwargs.get("lr", 0.001),
"betas": kwargs.get("betas", (0.9, 0.999)),
"weight_decay": kwargs.get("weight_decay", 0)
}
# logger.info("ADAM params: " + str(adam_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.001)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.Adam(all_params, **adam_params)
def adagrad(model, **kwargs):
# logger = kwargs.get("logger", None)
# assert(logger is not None)
adam_params = {
"lr": kwargs.get("lr", 0.01),
"weight_decay": kwargs.get("weight_decay", 0)
}
# logger.info("Adagrad params: " + str(adam_params))
all_params = [{'params': params, 'name': name, 'initial_lr': kwargs.get("lr", 0.01)} for l, (name, params) in enumerate(model.named_parameters())]
return optim.Adagrad(all_params, **adam_params)
| 2,099 | 37.181818 | 151 | py |
fiery | fiery-master/evaluate.py | from argparse import ArgumentParser
import torch
from tqdm import tqdm
from fiery.data import prepare_dataloaders
from fiery.trainer import TrainingModule
from fiery.metrics import IntersectionOverUnion, PanopticMetric
from fiery.utils.network import preprocess_batch
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
# 30mx30m, 100mx100m
EVALUATION_RANGES = {'30x30': (70, 130),
'100x100': (0, 200)
}
def eval(checkpoint_path, dataroot, version):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
print(f'Loaded weights from \n {checkpoint_path}')
trainer.eval()
device = torch.device('cuda:0')
trainer.to(device)
model = trainer.model
cfg = model.cfg
cfg.GPUS = "[0]"
cfg.BATCHSIZE = 1
cfg.DATASET.DATAROOT = dataroot
cfg.DATASET.VERSION = version
_, valloader = prepare_dataloaders(cfg)
panoptic_metrics = {}
iou_metrics = {}
n_classes = len(cfg.SEMANTIC_SEG.WEIGHTS)
for key in EVALUATION_RANGES.keys():
panoptic_metrics[key] = PanopticMetric(n_classes=n_classes, temporally_consistent=True).to(
device)
iou_metrics[key] = IntersectionOverUnion(n_classes).to(device)
for i, batch in enumerate(tqdm(valloader)):
preprocess_batch(batch, device)
image = batch['image']
intrinsics = batch['intrinsics']
extrinsics = batch['extrinsics']
future_egomotion = batch['future_egomotion']
batch_size = image.shape[0]
labels, future_distribution_inputs = trainer.prepare_future_labels(batch)
with torch.no_grad():
# Evaluate with mean prediction
noise = torch.zeros((batch_size, 1, model.latent_dim), device=device)
output = model(image, intrinsics, extrinsics, future_egomotion,
future_distribution_inputs, noise=noise)
# Consistent instance seg
pred_consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False, make_consistent=True
)
segmentation_pred = output['segmentation'].detach()
segmentation_pred = torch.argmax(segmentation_pred, dim=2, keepdims=True)
for key, grid in EVALUATION_RANGES.items():
limits = slice(grid[0], grid[1])
panoptic_metrics[key](pred_consistent_instance_seg[..., limits, limits].contiguous().detach(),
labels['instance'][..., limits, limits].contiguous()
)
iou_metrics[key](segmentation_pred[..., limits, limits].contiguous(),
labels['segmentation'][..., limits, limits].contiguous()
)
results = {}
for key, grid in EVALUATION_RANGES.items():
panoptic_scores = panoptic_metrics[key].compute()
for panoptic_key, value in panoptic_scores.items():
results[f'{panoptic_key}'] = results.get(f'{panoptic_key}', []) + [100 * value[1].item()]
iou_scores = iou_metrics[key].compute()
results['iou'] = results.get('iou', []) + [100 * iou_scores[1].item()]
for panoptic_key in ['iou', 'pq', 'sq', 'rq']:
print(panoptic_key)
print(' & '.join([f'{x:.1f}' for x in results[panoptic_key]]))
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery evaluation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
parser.add_argument('--dataroot', default='./nuscenes', type=str, help='path to the dataset')
parser.add_argument('--version', default='trainval', type=str, choices=['mini', 'trainval'],
help='dataset version')
args = parser.parse_args()
eval(args.checkpoint, args.dataroot, args.version)
| 3,908 | 36.951456 | 106 | py |
fiery | fiery-master/visualise.py | import os
from argparse import ArgumentParser
from glob import glob
import cv2
import numpy as np
import torch
import torchvision
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
from fiery.trainer import TrainingModule
from fiery.utils.network import NormalizeInverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy
EXAMPLE_DATA_PATH = 'example_data'
def plot_prediction(image, output, cfg):
# Process predictions
consistent_instance_seg, matched_centers = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=True
)
# Plot future trajectories
unique_ids = torch.unique(consistent_instance_seg[0, 0]).cpu().long().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_colours = generate_instance_colours(instance_map)
vis_image = plot_instance_map(consistent_instance_seg[0, 0].cpu().numpy(), instance_map)
trajectory_img = np.zeros(vis_image.shape, dtype=np.uint8)
for instance_id in unique_ids:
path = matched_centers[instance_id]
for t in range(len(path) - 1):
color = instance_colours[instance_id].tolist()
cv2.line(trajectory_img, tuple(path[t]), tuple(path[t + 1]),
color, 4)
# Overlay arrows
temp_img = cv2.addWeighted(vis_image, 0.7, trajectory_img, 0.3, 1.0)
mask = ~ np.all(trajectory_img == 0, axis=2)
vis_image[mask] = temp_img[mask]
# Plot present RGB frames and predictions
val_w = 2.99
cameras = cfg.IMAGE.NAMES
image_ratio = cfg.IMAGE.FINAL_DIM[0] / cfg.IMAGE.FINAL_DIM[1]
val_h = val_w * image_ratio
fig = plt.figure(figsize=(4 * val_w, 2 * val_h))
width_ratios = (val_w, val_w, val_w, val_w)
gs = mpl.gridspec.GridSpec(2, 4, width_ratios=width_ratios)
gs.update(wspace=0.0, hspace=0.0, left=0.0, right=1.0, top=1.0, bottom=0.0)
denormalise_img = torchvision.transforms.Compose(
(NormalizeInverse(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
torchvision.transforms.ToPILImage(),)
)
for imgi, img in enumerate(image[0, -1]):
ax = plt.subplot(gs[imgi // 3, imgi % 3])
showimg = denormalise_img(img.cpu())
if imgi > 2:
showimg = showimg.transpose(Image.FLIP_LEFT_RIGHT)
plt.annotate(cameras[imgi].replace('_', ' ').replace('CAM ', ''), (0.01, 0.87), c='white',
xycoords='axes fraction', fontsize=14)
plt.imshow(showimg)
plt.axis('off')
ax = plt.subplot(gs[:, 3])
plt.imshow(make_contour(vis_image[::-1, ::-1]))
plt.axis('off')
plt.draw()
figure_numpy = convert_figure_numpy(fig)
plt.close()
return figure_numpy
def download_example_data():
from requests import get
def download(url, file_name):
# open in binary mode
with open(file_name, "wb") as file:
# get request
response = get(url)
# write to file
file.write(response.content)
os.makedirs(EXAMPLE_DATA_PATH, exist_ok=True)
url_list = ['https://github.com/wayveai/fiery/releases/download/v1.0/example_1.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_2.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_3.npz',
'https://github.com/wayveai/fiery/releases/download/v1.0/example_4.npz'
]
for url in url_list:
download(url, os.path.join(EXAMPLE_DATA_PATH, os.path.basename(url)))
def visualise(checkpoint_path):
trainer = TrainingModule.load_from_checkpoint(checkpoint_path, strict=True)
device = torch.device('cuda:0')
trainer = trainer.to(device)
trainer.eval()
# Download example data
download_example_data()
# Load data
for data_path in sorted(glob(os.path.join(EXAMPLE_DATA_PATH, '*.npz'))):
data = np.load(data_path)
image = torch.from_numpy(data['image']).to(device)
intrinsics = torch.from_numpy(data['intrinsics']).to(device)
extrinsics = torch.from_numpy(data['extrinsics']).to(device)
future_egomotions = torch.from_numpy(data['future_egomotion']).to(device)
# Forward pass
with torch.no_grad():
output = trainer.model(image, intrinsics, extrinsics, future_egomotions)
figure_numpy = plot_prediction(image, output, trainer.cfg)
os.makedirs('./output_vis', exist_ok=True)
output_filename = os.path.join('./output_vis', os.path.basename(data_path).split('.')[0]) + '.png'
Image.fromarray(figure_numpy).save(output_filename)
print(f'Saved output in {output_filename}')
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery visualisation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
args = parser.parse_args()
visualise(args.checkpoint)
| 5,095 | 36.470588 | 118 | py |
fiery | fiery-master/train.py | import os
import time
import socket
import torch
import pytorch_lightning as pl
from pytorch_lightning.plugins import DDPPlugin
from fiery.config import get_parser, get_cfg
from fiery.data import prepare_dataloaders
from fiery.trainer import TrainingModule
def main():
args = get_parser().parse_args()
cfg = get_cfg(args)
trainloader, valloader = prepare_dataloaders(cfg)
model = TrainingModule(cfg.convert_to_dict())
if cfg.PRETRAINED.LOAD_WEIGHTS:
# Load single-image instance segmentation model.
pretrained_model_weights = torch.load(
os.path.join(cfg.DATASET.DATAROOT, cfg.PRETRAINED.PATH), map_location='cpu'
)['state_dict']
model.load_state_dict(pretrained_model_weights, strict=False)
print(f'Loaded single-image model weights from {cfg.PRETRAINED.PATH}')
save_dir = os.path.join(
cfg.LOG_DIR, time.strftime('%d%B%Yat%H:%M:%S%Z') + '_' + socket.gethostname() + '_' + cfg.TAG
)
tb_logger = pl.loggers.TensorBoardLogger(save_dir=save_dir)
trainer = pl.Trainer(
gpus=cfg.GPUS,
accelerator='ddp',
precision=cfg.PRECISION,
sync_batchnorm=True,
gradient_clip_val=cfg.GRAD_NORM_CLIP,
max_epochs=cfg.EPOCHS,
weights_summary='full',
logger=tb_logger,
log_every_n_steps=cfg.LOGGING_INTERVAL,
plugins=DDPPlugin(find_unused_parameters=True),
profiler='simple',
)
trainer.fit(model, trainloader, valloader)
if __name__ == "__main__":
main()
| 1,540 | 29.215686 | 101 | py |
fiery | fiery-master/fiery/losses.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialRegressionLoss(nn.Module):
def __init__(self, norm, ignore_index=255, future_discount=1.0):
super(SpatialRegressionLoss, self).__init__()
self.norm = norm
self.ignore_index = ignore_index
self.future_discount = future_discount
if norm == 1:
self.loss_fn = F.l1_loss
elif norm == 2:
self.loss_fn = F.mse_loss
else:
raise ValueError(f'Expected norm 1 or 2, but got norm={norm}')
def forward(self, prediction, target):
assert len(prediction.shape) == 5, 'Must be a 5D tensor'
# ignore_index is the same across all channels
mask = target[:, :, :1] != self.ignore_index
if mask.sum() == 0:
return prediction.new_zeros(1)[0].float()
loss = self.loss_fn(prediction, target, reduction='none')
# Sum channel dimension
loss = torch.sum(loss, dim=-3, keepdims=True)
seq_len = loss.shape[1]
future_discounts = self.future_discount ** torch.arange(seq_len, device=loss.device, dtype=loss.dtype)
future_discounts = future_discounts.view(1, seq_len, 1, 1, 1)
loss = loss * future_discounts
return loss[mask].mean()
class SegmentationLoss(nn.Module):
def __init__(self, class_weights, ignore_index=255, use_top_k=False, top_k_ratio=1.0, future_discount=1.0):
super().__init__()
self.class_weights = class_weights
self.ignore_index = ignore_index
self.use_top_k = use_top_k
self.top_k_ratio = top_k_ratio
self.future_discount = future_discount
def forward(self, prediction, target):
if target.shape[-3] != 1:
raise ValueError('segmentation label must be an index-label with channel dimension = 1.')
b, s, c, h, w = prediction.shape
prediction = prediction.view(b * s, c, h, w)
target = target.view(b * s, h, w)
loss = F.cross_entropy(
prediction,
target,
ignore_index=self.ignore_index,
reduction='none',
weight=self.class_weights.to(target.device),
)
loss = loss.view(b, s, h, w)
future_discounts = self.future_discount ** torch.arange(s, device=loss.device, dtype=loss.dtype)
future_discounts = future_discounts.view(1, s, 1, 1)
loss = loss * future_discounts
loss = loss.view(b, s, -1)
if self.use_top_k:
# Penalises the top-k hardest pixels
k = int(self.top_k_ratio * loss.shape[2])
loss, _ = torch.sort(loss, dim=2, descending=True)
loss = loss[:, :, :k]
return torch.mean(loss)
class ProbabilisticLoss(nn.Module):
def forward(self, output):
present_mu = output['present_mu']
present_log_sigma = output['present_log_sigma']
future_mu = output['future_mu']
future_log_sigma = output['future_log_sigma']
var_future = torch.exp(2 * future_log_sigma)
var_present = torch.exp(2 * present_log_sigma)
kl_div = (
present_log_sigma - future_log_sigma - 0.5 + (var_future + (future_mu - present_mu) ** 2) / (
2 * var_present)
)
kl_loss = torch.mean(torch.sum(kl_div, dim=-1))
return kl_loss
| 3,378 | 33.835052 | 111 | py |
fiery | fiery-master/fiery/data.py | import os
from PIL import Image
import numpy as np
import cv2
import torch
import torchvision
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import Box
from lyft_dataset_sdk.lyftdataset import LyftDataset
from fiery.utils.geometry import (
resize_and_crop_image,
update_intrinsics,
calculate_birds_eye_view_parameters,
convert_egopose_to_matrix_numpy,
pose_vec2mat,
mat2pose_vec,
invert_matrix_egopose_numpy,
)
from fiery.utils.instance import convert_instance_mask_to_center_and_offset_label
from fiery.utils.lyft_splits import TRAIN_LYFT_INDICES, VAL_LYFT_INDICES
class FuturePredictionDataset(torch.utils.data.Dataset):
def __init__(self, nusc, is_train, cfg):
self.nusc = nusc
self.is_train = is_train
self.cfg = cfg
self.is_lyft = isinstance(nusc, LyftDataset)
if self.is_lyft:
self.dataroot = self.nusc.data_path
else:
self.dataroot = self.nusc.dataroot
self.mode = 'train' if self.is_train else 'val'
self.sequence_length = cfg.TIME_RECEPTIVE_FIELD + cfg.N_FUTURE_FRAMES
self.scenes = self.get_scenes()
self.ixes = self.prepro()
self.indices = self.get_indices()
# Image resizing and cropping
self.augmentation_parameters = self.get_resizing_and_cropping_parameters()
# Normalising input images
self.normalise_image = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
# Bird's-eye view parameters
bev_resolution, bev_start_position, bev_dimension = calculate_birds_eye_view_parameters(
cfg.LIFT.X_BOUND, cfg.LIFT.Y_BOUND, cfg.LIFT.Z_BOUND
)
self.bev_resolution, self.bev_start_position, self.bev_dimension = (
bev_resolution.numpy(), bev_start_position.numpy(), bev_dimension.numpy()
)
# Spatial extent in bird's-eye view, in meters
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
def get_scenes(self):
if self.is_lyft:
scenes = [row['name'] for row in self.nusc.scene]
# Split in train/val
indices = TRAIN_LYFT_INDICES if self.is_train else VAL_LYFT_INDICES
scenes = [scenes[i] for i in indices]
else:
# filter by scene split
split = {'v1.0-trainval': {True: 'train', False: 'val'},
'v1.0-mini': {True: 'mini_train', False: 'mini_val'},}[
self.nusc.version
][self.is_train]
scenes = create_splits_scenes()[split]
return scenes
def prepro(self):
samples = [samp for samp in self.nusc.sample]
# remove samples that aren't in this split
samples = [samp for samp in samples if self.nusc.get('scene', samp['scene_token'])['name'] in self.scenes]
# sort by scene, timestamp (only to make chronological viz easier)
samples.sort(key=lambda x: (x['scene_token'], x['timestamp']))
return samples
def get_indices(self):
indices = []
for index in range(len(self.ixes)):
is_valid_data = True
previous_rec = None
current_indices = []
for t in range(self.sequence_length):
index_t = index + t
# Going over the dataset size limit.
if index_t >= len(self.ixes):
is_valid_data = False
break
rec = self.ixes[index_t]
# Check if scene is the same
if (previous_rec is not None) and (rec['scene_token'] != previous_rec['scene_token']):
is_valid_data = False
break
current_indices.append(index_t)
previous_rec = rec
if is_valid_data:
indices.append(current_indices)
return np.asarray(indices)
def get_resizing_and_cropping_parameters(self):
original_height, original_width = self.cfg.IMAGE.ORIGINAL_HEIGHT, self.cfg.IMAGE.ORIGINAL_WIDTH
final_height, final_width = self.cfg.IMAGE.FINAL_DIM
resize_scale = self.cfg.IMAGE.RESIZE_SCALE
resize_dims = (int(original_width * resize_scale), int(original_height * resize_scale))
resized_width, resized_height = resize_dims
crop_h = self.cfg.IMAGE.TOP_CROP
crop_w = int(max(0, (resized_width - final_width) / 2))
# Left, top, right, bottom crops.
crop = (crop_w, crop_h, crop_w + final_width, crop_h + final_height)
if resized_width != final_width:
print('Zero padding left and right parts of the image.')
if crop_h + final_height != resized_height:
print('Zero padding bottom part of the image.')
return {'scale_width': resize_scale,
'scale_height': resize_scale,
'resize_dims': resize_dims,
'crop': crop,
}
def get_input_data(self, rec):
"""
Parameters
----------
rec: nuscenes identifier for a given timestamp
Returns
-------
images: torch.Tensor<float> (N, 3, H, W)
intrinsics: torch.Tensor<float> (3, 3)
extrinsics: torch.Tensor(N, 4, 4)
"""
images = []
intrinsics = []
extrinsics = []
cameras = self.cfg.IMAGE.NAMES
# The extrinsics we want are from the camera sensor to "flat egopose" as defined
# https://github.com/nutonomy/nuscenes-devkit/blob/9b492f76df22943daf1dc991358d3d606314af27/python-sdk/nuscenes/nuscenes.py#L279
# which corresponds to the position of the lidar.
# This is because the labels are generated by projecting the 3D bounding box in this lidar's reference frame.
# From lidar egopose to world.
lidar_sample = self.nusc.get('sample_data', rec['data']['LIDAR_TOP'])
lidar_pose = self.nusc.get('ego_pose', lidar_sample['ego_pose_token'])
yaw = Quaternion(lidar_pose['rotation']).yaw_pitch_roll[0]
lidar_rotation = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)])
lidar_translation = np.array(lidar_pose['translation'])[:, None]
lidar_to_world = np.vstack([
np.hstack((lidar_rotation.rotation_matrix, lidar_translation)),
np.array([0, 0, 0, 1])
])
for cam in cameras:
camera_sample = self.nusc.get('sample_data', rec['data'][cam])
# Transformation from world to egopose
car_egopose = self.nusc.get('ego_pose', camera_sample['ego_pose_token'])
egopose_rotation = Quaternion(car_egopose['rotation']).inverse
egopose_translation = -np.array(car_egopose['translation'])[:, None]
world_to_car_egopose = np.vstack([
np.hstack((egopose_rotation.rotation_matrix, egopose_rotation.rotation_matrix @ egopose_translation)),
np.array([0, 0, 0, 1])
])
# From egopose to sensor
sensor_sample = self.nusc.get('calibrated_sensor', camera_sample['calibrated_sensor_token'])
intrinsic = torch.Tensor(sensor_sample['camera_intrinsic'])
sensor_rotation = Quaternion(sensor_sample['rotation'])
sensor_translation = np.array(sensor_sample['translation'])[:, None]
car_egopose_to_sensor = np.vstack([
np.hstack((sensor_rotation.rotation_matrix, sensor_translation)),
np.array([0, 0, 0, 1])
])
car_egopose_to_sensor = np.linalg.inv(car_egopose_to_sensor)
# Combine all the transformation.
# From sensor to lidar.
lidar_to_sensor = car_egopose_to_sensor @ world_to_car_egopose @ lidar_to_world
sensor_to_lidar = torch.from_numpy(np.linalg.inv(lidar_to_sensor)).float()
# Load image
image_filename = os.path.join(self.dataroot, camera_sample['filename'])
img = Image.open(image_filename)
# Resize and crop
img = resize_and_crop_image(
img, resize_dims=self.augmentation_parameters['resize_dims'], crop=self.augmentation_parameters['crop']
)
# Normalise image
normalised_img = self.normalise_image(img)
# Combine resize/cropping in the intrinsics
top_crop = self.augmentation_parameters['crop'][1]
left_crop = self.augmentation_parameters['crop'][0]
intrinsic = update_intrinsics(
intrinsic, top_crop, left_crop,
scale_width=self.augmentation_parameters['scale_width'],
scale_height=self.augmentation_parameters['scale_height']
)
images.append(normalised_img.unsqueeze(0).unsqueeze(0))
intrinsics.append(intrinsic.unsqueeze(0).unsqueeze(0))
extrinsics.append(sensor_to_lidar.unsqueeze(0).unsqueeze(0))
images, intrinsics, extrinsics = (torch.cat(images, dim=1),
torch.cat(intrinsics, dim=1),
torch.cat(extrinsics, dim=1)
)
return images, intrinsics, extrinsics
def _get_top_lidar_pose(self, rec):
egopose = self.nusc.get('ego_pose', self.nusc.get('sample_data', rec['data']['LIDAR_TOP'])['ego_pose_token'])
trans = -np.array(egopose['translation'])
yaw = Quaternion(egopose['rotation']).yaw_pitch_roll[0]
rot = Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse
return trans, rot
def get_birds_eye_view_label(self, rec, instance_map):
translation, rotation = self._get_top_lidar_pose(rec)
segmentation = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
# Background is ID 0
instance = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
z_position = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
attribute_label = np.zeros((self.bev_dimension[0], self.bev_dimension[1]))
for annotation_token in rec['anns']:
# Filter out all non vehicle instances
annotation = self.nusc.get('sample_annotation', annotation_token)
if not self.is_lyft:
# NuScenes filter
if 'vehicle' not in annotation['category_name']:
continue
if self.cfg.DATASET.FILTER_INVISIBLE_VEHICLES and int(annotation['visibility_token']) == 1:
continue
else:
# Lyft filter
if annotation['category_name'] not in ['bus', 'car', 'construction_vehicle', 'trailer', 'truck']:
continue
if annotation['instance_token'] not in instance_map:
instance_map[annotation['instance_token']] = len(instance_map) + 1
instance_id = instance_map[annotation['instance_token']]
if not self.is_lyft:
instance_attribute = int(annotation['visibility_token'])
else:
instance_attribute = 0
poly_region, z = self._get_poly_region_in_image(annotation, translation, rotation)
cv2.fillPoly(instance, [poly_region], instance_id)
cv2.fillPoly(segmentation, [poly_region], 1.0)
cv2.fillPoly(z_position, [poly_region], z)
cv2.fillPoly(attribute_label, [poly_region], instance_attribute)
return segmentation, instance, z_position, instance_map, attribute_label
def _get_poly_region_in_image(self, instance_annotation, ego_translation, ego_rotation):
box = Box(
instance_annotation['translation'], instance_annotation['size'], Quaternion(instance_annotation['rotation'])
)
box.translate(ego_translation)
box.rotate(ego_rotation)
pts = box.bottom_corners()[:2].T
pts = np.round((pts - self.bev_start_position[:2] + self.bev_resolution[:2] / 2.0) / self.bev_resolution[:2]).astype(np.int32)
pts[:, [1, 0]] = pts[:, [0, 1]]
z = box.bottom_corners()[2, 0]
return pts, z
def get_label(self, rec, instance_map):
segmentation_np, instance_np, z_position_np, instance_map, attribute_label_np = \
self.get_birds_eye_view_label(rec, instance_map)
segmentation = torch.from_numpy(segmentation_np).long().unsqueeze(0).unsqueeze(0)
instance = torch.from_numpy(instance_np).long().unsqueeze(0)
z_position = torch.from_numpy(z_position_np).float().unsqueeze(0).unsqueeze(0)
attribute_label = torch.from_numpy(attribute_label_np).long().unsqueeze(0).unsqueeze(0)
return segmentation, instance, z_position, instance_map, attribute_label
def get_future_egomotion(self, rec, index):
rec_t0 = rec
# Identity
future_egomotion = np.eye(4, dtype=np.float32)
if index < len(self.ixes) - 1:
rec_t1 = self.ixes[index + 1]
if rec_t0['scene_token'] == rec_t1['scene_token']:
egopose_t0 = self.nusc.get(
'ego_pose', self.nusc.get('sample_data', rec_t0['data']['LIDAR_TOP'])['ego_pose_token']
)
egopose_t1 = self.nusc.get(
'ego_pose', self.nusc.get('sample_data', rec_t1['data']['LIDAR_TOP'])['ego_pose_token']
)
egopose_t0 = convert_egopose_to_matrix_numpy(egopose_t0)
egopose_t1 = convert_egopose_to_matrix_numpy(egopose_t1)
future_egomotion = invert_matrix_egopose_numpy(egopose_t1).dot(egopose_t0)
future_egomotion[3, :3] = 0.0
future_egomotion[3, 3] = 1.0
future_egomotion = torch.Tensor(future_egomotion).float()
# Convert to 6DoF vector
future_egomotion = mat2pose_vec(future_egomotion)
return future_egomotion.unsqueeze(0)
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
"""
Returns
-------
data: dict with the following keys:
image: torch.Tensor<float> (T, N, 3, H, W)
normalised cameras images with T the sequence length, and N the number of cameras.
intrinsics: torch.Tensor<float> (T, N, 3, 3)
intrinsics containing resizing and cropping parameters.
extrinsics: torch.Tensor<float> (T, N, 4, 4)
6 DoF pose from world coordinates to camera coordinates.
segmentation: torch.Tensor<int64> (T, 1, H_bev, W_bev)
(H_bev, W_bev) are the pixel dimensions in bird's-eye view.
instance: torch.Tensor<int64> (T, 1, H_bev, W_bev)
centerness: torch.Tensor<float> (T, 1, H_bev, W_bev)
offset: torch.Tensor<float> (T, 2, H_bev, W_bev)
flow: torch.Tensor<float> (T, 2, H_bev, W_bev)
future_egomotion: torch.Tensor<float> (T, 6)
6 DoF egomotion t -> t+1
sample_token: List<str> (T,)
'z_position': list_z_position,
'attribute': list_attribute_label,
"""
data = {}
keys = ['image', 'intrinsics', 'extrinsics',
'segmentation', 'instance', 'centerness', 'offset', 'flow', 'future_egomotion',
'sample_token',
'z_position', 'attribute'
]
for key in keys:
data[key] = []
instance_map = {}
# Loop over all the frames in the sequence.
for index_t in self.indices[index]:
rec = self.ixes[index_t]
images, intrinsics, extrinsics = self.get_input_data(rec)
segmentation, instance, z_position, instance_map, attribute_label = self.get_label(rec, instance_map)
future_egomotion = self.get_future_egomotion(rec, index_t)
data['image'].append(images)
data['intrinsics'].append(intrinsics)
data['extrinsics'].append(extrinsics)
data['segmentation'].append(segmentation)
data['instance'].append(instance)
data['future_egomotion'].append(future_egomotion)
data['sample_token'].append(rec['token'])
data['z_position'].append(z_position)
data['attribute'].append(attribute_label)
for key, value in data.items():
if key in ['sample_token', 'centerness', 'offset', 'flow']:
continue
data[key] = torch.cat(value, dim=0)
# If lyft need to subsample, and update future_egomotions
if self.cfg.MODEL.SUBSAMPLE:
for key, value in data.items():
if key in ['future_egomotion', 'sample_token', 'centerness', 'offset', 'flow']:
continue
data[key] = data[key][::2].clone()
data['sample_token'] = data['sample_token'][::2]
# Update future egomotions
future_egomotions_matrix = pose_vec2mat(data['future_egomotion'])
future_egomotion_accum = torch.zeros_like(future_egomotions_matrix)
future_egomotion_accum[:-1] = future_egomotions_matrix[:-1] @ future_egomotions_matrix[1:]
future_egomotion_accum = mat2pose_vec(future_egomotion_accum)
data['future_egomotion'] = future_egomotion_accum[::2].clone()
instance_centerness, instance_offset, instance_flow = convert_instance_mask_to_center_and_offset_label(
data['instance'], data['future_egomotion'],
num_instances=len(instance_map), ignore_index=self.cfg.DATASET.IGNORE_INDEX, subtract_egomotion=True,
spatial_extent=self.spatial_extent,
)
data['centerness'] = instance_centerness
data['offset'] = instance_offset
data['flow'] = instance_flow
return data
def prepare_dataloaders(cfg, return_dataset=False):
version = cfg.DATASET.VERSION
train_on_training_data = True
if cfg.DATASET.NAME == 'nuscenes':
# 28130 train and 6019 val
dataroot = os.path.join(cfg.DATASET.DATAROOT, version)
nusc = NuScenes(version='v1.0-{}'.format(cfg.DATASET.VERSION), dataroot=dataroot, verbose=False)
elif cfg.DATASET.NAME == 'lyft':
# train contains 22680 samples
# we split in 16506 6174
dataroot = os.path.join(cfg.DATASET.DATAROOT, 'trainval')
nusc = LyftDataset(data_path=dataroot,
json_path=os.path.join(dataroot, 'train_data'),
verbose=True)
traindata = FuturePredictionDataset(nusc, train_on_training_data, cfg)
valdata = FuturePredictionDataset(nusc, False, cfg)
if cfg.DATASET.VERSION == 'mini':
traindata.indices = traindata.indices[:10]
valdata.indices = valdata.indices[:10]
nworkers = cfg.N_WORKERS
trainloader = torch.utils.data.DataLoader(
traindata, batch_size=cfg.BATCHSIZE, shuffle=True, num_workers=nworkers, pin_memory=True, drop_last=True
)
valloader = torch.utils.data.DataLoader(
valdata, batch_size=cfg.BATCHSIZE, shuffle=False, num_workers=nworkers, pin_memory=True, drop_last=False)
if return_dataset:
return trainloader, valloader, traindata, valdata
else:
return trainloader, valloader
| 19,735 | 41.62635 | 136 | py |
fiery | fiery-master/fiery/metrics.py | from typing import Optional
import torch
from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.metrics.functional.classification import stat_scores_multiple_classes
from pytorch_lightning.metrics.functional.reduction import reduce
class IntersectionOverUnion(Metric):
"""Computes intersection-over-union."""
def __init__(
self,
n_classes: int,
ignore_index: Optional[int] = None,
absent_score: float = 0.0,
reduction: str = 'none',
compute_on_step: bool = False,
):
super().__init__(compute_on_step=compute_on_step)
self.n_classes = n_classes
self.ignore_index = ignore_index
self.absent_score = absent_score
self.reduction = reduction
self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('support', default=torch.zeros(n_classes), dist_reduce_fx='sum')
def update(self, prediction: torch.Tensor, target: torch.Tensor):
tps, fps, _, fns, sups = stat_scores_multiple_classes(prediction, target, self.n_classes)
self.true_positive += tps
self.false_positive += fps
self.false_negative += fns
self.support += sups
def compute(self):
scores = torch.zeros(self.n_classes, device=self.true_positive.device, dtype=torch.float32)
for class_idx in range(self.n_classes):
if class_idx == self.ignore_index:
continue
tp = self.true_positive[class_idx]
fp = self.false_positive[class_idx]
fn = self.false_negative[class_idx]
sup = self.support[class_idx]
# If this class is absent in the target (no support) AND absent in the pred (no true or false
# positives), then use the absent_score for this class.
if sup + tp + fp == 0:
scores[class_idx] = self.absent_score
continue
denominator = tp + fp + fn
score = tp.to(torch.float) / denominator
scores[class_idx] = score
# Remove the ignored class index from the scores.
if (self.ignore_index is not None) and (0 <= self.ignore_index < self.n_classes):
scores = torch.cat([scores[:self.ignore_index], scores[self.ignore_index+1:]])
return reduce(scores, reduction=self.reduction)
class PanopticMetric(Metric):
def __init__(
self,
n_classes: int,
temporally_consistent: bool = True,
vehicles_id: int = 1,
compute_on_step: bool = False,
):
super().__init__(compute_on_step=compute_on_step)
self.n_classes = n_classes
self.temporally_consistent = temporally_consistent
self.vehicles_id = vehicles_id
self.keys = ['iou', 'true_positive', 'false_positive', 'false_negative']
self.add_state('iou', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum')
self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum')
def update(self, pred_instance, gt_instance):
"""
Update state with predictions and targets.
Parameters
----------
pred_instance: (b, s, h, w)
Temporally consistent instance segmentation prediction.
gt_instance: (b, s, h, w)
Ground truth instance segmentation.
"""
batch_size, sequence_length = gt_instance.shape[:2]
# Process labels
assert gt_instance.min() == 0, 'ID 0 of gt_instance must be background'
pred_segmentation = (pred_instance > 0).long()
gt_segmentation = (gt_instance > 0).long()
for b in range(batch_size):
unique_id_mapping = {}
for t in range(sequence_length):
result = self.panoptic_metrics(
pred_segmentation[b, t].detach(),
pred_instance[b, t].detach(),
gt_segmentation[b, t],
gt_instance[b, t],
unique_id_mapping,
)
self.iou += result['iou']
self.true_positive += result['true_positive']
self.false_positive += result['false_positive']
self.false_negative += result['false_negative']
def compute(self):
denominator = torch.maximum(
(self.true_positive + self.false_positive / 2 + self.false_negative / 2),
torch.ones_like(self.true_positive)
)
pq = self.iou / denominator
sq = self.iou / torch.maximum(self.true_positive, torch.ones_like(self.true_positive))
rq = self.true_positive / denominator
return {'pq': pq,
'sq': sq,
'rq': rq,
# If 0, it means there wasn't any detection.
'denominator': (self.true_positive + self.false_positive / 2 + self.false_negative / 2),
}
def panoptic_metrics(self, pred_segmentation, pred_instance, gt_segmentation, gt_instance, unique_id_mapping):
"""
Computes panoptic quality metric components.
Parameters
----------
pred_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void)
pred_instance: [H, W] range {0, ..., n_instances} (zero means background)
gt_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void)
gt_instance: [H, W] range {0, ..., n_instances} (zero means background)
unique_id_mapping: instance id mapping to check consistency
"""
n_classes = self.n_classes
result = {key: torch.zeros(n_classes, dtype=torch.float32, device=gt_instance.device) for key in self.keys}
assert pred_segmentation.dim() == 2
assert pred_segmentation.shape == pred_instance.shape == gt_segmentation.shape == gt_instance.shape
n_instances = int(torch.cat([pred_instance, gt_instance]).max().item())
n_all_things = n_instances + n_classes # Classes + instances.
n_things_and_void = n_all_things + 1
# Now 1 is background; 0 is void (not used). 2 is vehicle semantic class but since it overlaps with
# instances, it is not present.
# and the rest are instance ids starting from 3
prediction, pred_to_cls = self.combine_mask(pred_segmentation, pred_instance, n_classes, n_all_things)
target, target_to_cls = self.combine_mask(gt_segmentation, gt_instance, n_classes, n_all_things)
# Compute ious between all stuff and things
# hack for bincounting 2 arrays together
x = prediction + n_things_and_void * target
bincount_2d = torch.bincount(x.long(), minlength=n_things_and_void ** 2)
if bincount_2d.shape[0] != n_things_and_void ** 2:
raise ValueError('Incorrect bincount size.')
conf = bincount_2d.reshape((n_things_and_void, n_things_and_void))
# Drop void class
conf = conf[1:, 1:]
# Confusion matrix contains intersections between all combinations of classes
union = conf.sum(0).unsqueeze(0) + conf.sum(1).unsqueeze(1) - conf
iou = torch.where(union > 0, (conf.float() + 1e-9) / (union.float() + 1e-9), torch.zeros_like(union).float())
# In the iou matrix, first dimension is target idx, second dimension is pred idx.
# Mapping will contain a tuple that maps prediction idx to target idx for segments matched by iou.
mapping = (iou > 0.5).nonzero(as_tuple=False)
# Check that classes match.
is_matching = pred_to_cls[mapping[:, 1]] == target_to_cls[mapping[:, 0]]
mapping = mapping[is_matching]
tp_mask = torch.zeros_like(conf, dtype=torch.bool)
tp_mask[mapping[:, 0], mapping[:, 1]] = True
# First ids correspond to "stuff" i.e. semantic seg.
# Instance ids are offset accordingly
for target_id, pred_id in mapping:
cls_id = pred_to_cls[pred_id]
if self.temporally_consistent and cls_id == self.vehicles_id:
if target_id.item() in unique_id_mapping and unique_id_mapping[target_id.item()] != pred_id.item():
# Not temporally consistent
result['false_negative'][target_to_cls[target_id]] += 1
result['false_positive'][pred_to_cls[pred_id]] += 1
unique_id_mapping[target_id.item()] = pred_id.item()
continue
result['true_positive'][cls_id] += 1
result['iou'][cls_id] += iou[target_id][pred_id]
unique_id_mapping[target_id.item()] = pred_id.item()
for target_id in range(n_classes, n_all_things):
# If this is a true positive do nothing.
if tp_mask[target_id, n_classes:].any():
continue
# If this target instance didn't match with any predictions and was present set it as false negative.
if target_to_cls[target_id] != -1:
result['false_negative'][target_to_cls[target_id]] += 1
for pred_id in range(n_classes, n_all_things):
# If this is a true positive do nothing.
if tp_mask[n_classes:, pred_id].any():
continue
# If this predicted instance didn't match with any prediction, set that predictions as false positive.
if pred_to_cls[pred_id] != -1 and (conf[:, pred_id] > 0).any():
result['false_positive'][pred_to_cls[pred_id]] += 1
return result
def combine_mask(self, segmentation: torch.Tensor, instance: torch.Tensor, n_classes: int, n_all_things: int):
"""Shifts all things ids by num_classes and combines things and stuff into a single mask
Returns a combined mask + a mapping from id to segmentation class.
"""
instance = instance.view(-1)
instance_mask = instance > 0
instance = instance - 1 + n_classes
segmentation = segmentation.clone().view(-1)
segmentation_mask = segmentation < n_classes # Remove void pixels.
# Build an index from instance id to class id.
instance_id_to_class_tuples = torch.cat(
(
instance[instance_mask & segmentation_mask].unsqueeze(1),
segmentation[instance_mask & segmentation_mask].unsqueeze(1),
),
dim=1,
)
instance_id_to_class = -instance_id_to_class_tuples.new_ones((n_all_things,))
instance_id_to_class[instance_id_to_class_tuples[:, 0]] = instance_id_to_class_tuples[:, 1]
instance_id_to_class[torch.arange(n_classes, device=segmentation.device)] = torch.arange(
n_classes, device=segmentation.device
)
segmentation[instance_mask] = instance[instance_mask]
segmentation += 1 # Shift all legit classes by 1.
segmentation[~segmentation_mask] = 0 # Shift void class to zero.
return segmentation, instance_id_to_class
| 11,415 | 43.59375 | 117 | py |
fiery | fiery-master/fiery/trainer.py | import torch
import torch.nn as nn
import pytorch_lightning as pl
from fiery.config import get_cfg
from fiery.models.fiery import Fiery
from fiery.losses import ProbabilisticLoss, SpatialRegressionLoss, SegmentationLoss
from fiery.metrics import IntersectionOverUnion, PanopticMetric
from fiery.utils.geometry import cumulative_warp_features_reverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import visualise_output
class TrainingModule(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
# see config.py for details
self.hparams = hparams
# pytorch lightning does not support saving YACS CfgNone
cfg = get_cfg(cfg_dict=self.hparams)
self.cfg = cfg
self.n_classes = len(self.cfg.SEMANTIC_SEG.WEIGHTS)
# Bird's-eye view extent in meters
assert self.cfg.LIFT.X_BOUND[1] > 0 and self.cfg.LIFT.Y_BOUND[1] > 0
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
# Model
self.model = Fiery(cfg)
# Losses
self.losses_fn = nn.ModuleDict()
self.losses_fn['segmentation'] = SegmentationLoss(
class_weights=torch.Tensor(self.cfg.SEMANTIC_SEG.WEIGHTS),
use_top_k=self.cfg.SEMANTIC_SEG.USE_TOP_K,
top_k_ratio=self.cfg.SEMANTIC_SEG.TOP_K_RATIO,
future_discount=self.cfg.FUTURE_DISCOUNT,
)
# Uncertainty weighting
self.model.segmentation_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.metric_iou_val = IntersectionOverUnion(self.n_classes)
self.losses_fn['instance_center'] = SpatialRegressionLoss(
norm=2, future_discount=self.cfg.FUTURE_DISCOUNT
)
self.losses_fn['instance_offset'] = SpatialRegressionLoss(
norm=1, future_discount=self.cfg.FUTURE_DISCOUNT, ignore_index=self.cfg.DATASET.IGNORE_INDEX
)
# Uncertainty weighting
self.model.centerness_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.model.offset_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
self.metric_panoptic_val = PanopticMetric(n_classes=self.n_classes)
if self.cfg.INSTANCE_FLOW.ENABLED:
self.losses_fn['instance_flow'] = SpatialRegressionLoss(
norm=1, future_discount=self.cfg.FUTURE_DISCOUNT, ignore_index=self.cfg.DATASET.IGNORE_INDEX
)
# Uncertainty weighting
self.model.flow_weight = nn.Parameter(torch.tensor(0.0), requires_grad=True)
if self.cfg.PROBABILISTIC.ENABLED:
self.losses_fn['probabilistic'] = ProbabilisticLoss()
self.training_step_count = 0
def shared_step(self, batch, is_train):
image = batch['image']
intrinsics = batch['intrinsics']
extrinsics = batch['extrinsics']
future_egomotion = batch['future_egomotion']
# Warp labels
labels, future_distribution_inputs = self.prepare_future_labels(batch)
# Forward pass
output = self.model(
image, intrinsics, extrinsics, future_egomotion, future_distribution_inputs
)
#####
# Loss computation
#####
loss = {}
segmentation_factor = 1 / torch.exp(self.model.segmentation_weight)
loss['segmentation'] = segmentation_factor * self.losses_fn['segmentation'](
output['segmentation'], labels['segmentation']
)
loss['segmentation_uncertainty'] = 0.5 * self.model.segmentation_weight
centerness_factor = 1 / (2*torch.exp(self.model.centerness_weight))
loss['instance_center'] = centerness_factor * self.losses_fn['instance_center'](
output['instance_center'], labels['centerness']
)
offset_factor = 1 / (2*torch.exp(self.model.offset_weight))
loss['instance_offset'] = offset_factor * self.losses_fn['instance_offset'](
output['instance_offset'], labels['offset']
)
loss['centerness_uncertainty'] = 0.5 * self.model.centerness_weight
loss['offset_uncertainty'] = 0.5 * self.model.offset_weight
if self.cfg.INSTANCE_FLOW.ENABLED:
flow_factor = 1 / (2*torch.exp(self.model.flow_weight))
loss['instance_flow'] = flow_factor * self.losses_fn['instance_flow'](
output['instance_flow'], labels['flow']
)
loss['flow_uncertainty'] = 0.5 * self.model.flow_weight
if self.cfg.PROBABILISTIC.ENABLED:
loss['probabilistic'] = self.cfg.PROBABILISTIC.WEIGHT * self.losses_fn['probabilistic'](output)
# Metrics
if not is_train:
seg_prediction = output['segmentation'].detach()
seg_prediction = torch.argmax(seg_prediction, dim=2, keepdims=True)
self.metric_iou_val(seg_prediction, labels['segmentation'])
pred_consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False
)
self.metric_panoptic_val(pred_consistent_instance_seg, labels['instance'])
return output, labels, loss
def prepare_future_labels(self, batch):
labels = {}
future_distribution_inputs = []
segmentation_labels = batch['segmentation']
instance_center_labels = batch['centerness']
instance_offset_labels = batch['offset']
instance_flow_labels = batch['flow']
gt_instance = batch['instance']
future_egomotion = batch['future_egomotion']
# Warp labels to present's reference frame
segmentation_labels = cumulative_warp_features_reverse(
segmentation_labels[:, (self.model.receptive_field - 1):].float(),
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).long().contiguous()
labels['segmentation'] = segmentation_labels
future_distribution_inputs.append(segmentation_labels)
# Warp instance labels to present's reference frame
gt_instance = cumulative_warp_features_reverse(
gt_instance[:, (self.model.receptive_field - 1):].float().unsqueeze(2),
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).long().contiguous()[:, :, 0]
labels['instance'] = gt_instance
instance_center_labels = cumulative_warp_features_reverse(
instance_center_labels[:, (self.model.receptive_field - 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['centerness'] = instance_center_labels
instance_offset_labels = cumulative_warp_features_reverse(
instance_offset_labels[:, (self.model.receptive_field- 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['offset'] = instance_offset_labels
future_distribution_inputs.append(instance_center_labels)
future_distribution_inputs.append(instance_offset_labels)
if self.cfg.INSTANCE_FLOW.ENABLED:
instance_flow_labels = cumulative_warp_features_reverse(
instance_flow_labels[:, (self.model.receptive_field - 1):],
future_egomotion[:, (self.model.receptive_field - 1):],
mode='nearest', spatial_extent=self.spatial_extent,
).contiguous()
labels['flow'] = instance_flow_labels
future_distribution_inputs.append(instance_flow_labels)
if len(future_distribution_inputs) > 0:
future_distribution_inputs = torch.cat(future_distribution_inputs, dim=2)
return labels, future_distribution_inputs
def visualise(self, labels, output, batch_idx, prefix='train'):
visualisation_video = visualise_output(labels, output, self.cfg)
name = f'{prefix}_outputs'
if prefix == 'val':
name = name + f'_{batch_idx}'
self.logger.experiment.add_video(name, visualisation_video, global_step=self.training_step_count, fps=2)
def training_step(self, batch, batch_idx):
output, labels, loss = self.shared_step(batch, True)
self.training_step_count += 1
for key, value in loss.items():
self.logger.experiment.add_scalar(key, value, global_step=self.training_step_count)
if self.training_step_count % self.cfg.VIS_INTERVAL == 0:
self.visualise(labels, output, batch_idx, prefix='train')
return sum(loss.values())
def validation_step(self, batch, batch_idx):
output, labels, loss = self.shared_step(batch, False)
for key, value in loss.items():
self.log('val_' + key, value)
if batch_idx == 0:
self.visualise(labels, output, batch_idx, prefix='val')
def shared_epoch_end(self, step_outputs, is_train):
# log per class iou metrics
class_names = ['background', 'dynamic']
if not is_train:
scores = self.metric_iou_val.compute()
for key, value in zip(class_names, scores):
self.logger.experiment.add_scalar('val_iou_' + key, value, global_step=self.training_step_count)
self.metric_iou_val.reset()
if not is_train:
scores = self.metric_panoptic_val.compute()
for key, value in scores.items():
for instance_name, score in zip(['background', 'vehicles'], value):
if instance_name != 'background':
self.logger.experiment.add_scalar(f'val_{key}_{instance_name}', score.item(),
global_step=self.training_step_count)
self.metric_panoptic_val.reset()
self.logger.experiment.add_scalar('segmentation_weight',
1 / (torch.exp(self.model.segmentation_weight)),
global_step=self.training_step_count)
self.logger.experiment.add_scalar('centerness_weight',
1 / (2 * torch.exp(self.model.centerness_weight)),
global_step=self.training_step_count)
self.logger.experiment.add_scalar('offset_weight', 1 / (2 * torch.exp(self.model.offset_weight)),
global_step=self.training_step_count)
if self.cfg.INSTANCE_FLOW.ENABLED:
self.logger.experiment.add_scalar('flow_weight', 1 / (2 * torch.exp(self.model.flow_weight)),
global_step=self.training_step_count)
def training_epoch_end(self, step_outputs):
self.shared_epoch_end(step_outputs, True)
def validation_epoch_end(self, step_outputs):
self.shared_epoch_end(step_outputs, False)
def configure_optimizers(self):
params = self.model.parameters()
optimizer = torch.optim.Adam(
params, lr=self.cfg.OPTIMIZER.LR, weight_decay=self.cfg.OPTIMIZER.WEIGHT_DECAY
)
return optimizer
| 11,419 | 42.754789 | 112 | py |
fiery | fiery-master/fiery/models/distributions.py | import torch
import torch.nn as nn
from fiery.layers.convolutions import Bottleneck
class DistributionModule(nn.Module):
"""
A convolutional net that parametrises a diagonal Gaussian distribution.
"""
def __init__(
self, in_channels, latent_dim, min_log_sigma, max_log_sigma):
super().__init__()
self.compress_dim = in_channels // 2
self.latent_dim = latent_dim
self.min_log_sigma = min_log_sigma
self.max_log_sigma = max_log_sigma
self.encoder = DistributionEncoder(
in_channels,
self.compress_dim,
)
self.last_conv = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(self.compress_dim, out_channels=2 * self.latent_dim, kernel_size=1)
)
def forward(self, s_t):
b, s = s_t.shape[:2]
assert s == 1
encoding = self.encoder(s_t[:, 0])
mu_log_sigma = self.last_conv(encoding).view(b, 1, 2 * self.latent_dim)
mu = mu_log_sigma[:, :, :self.latent_dim]
log_sigma = mu_log_sigma[:, :, self.latent_dim:]
# clip the log_sigma value for numerical stability
log_sigma = torch.clamp(log_sigma, self.min_log_sigma, self.max_log_sigma)
return mu, log_sigma
class DistributionEncoder(nn.Module):
"""Encodes s_t or (s_t, y_{t+1}, ..., y_{t+H}).
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.model = nn.Sequential(
Bottleneck(in_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
Bottleneck(out_channels, out_channels=out_channels, downsample=True),
)
def forward(self, s_t):
return self.model(s_t)
| 1,871 | 31.842105 | 114 | py |
fiery | fiery-master/fiery/models/future_prediction.py | import torch
from fiery.layers.convolutions import Bottleneck
from fiery.layers.temporal import SpatialGRU
class FuturePrediction(torch.nn.Module):
def __init__(self, in_channels, latent_dim, n_gru_blocks=3, n_res_layers=3):
super().__init__()
self.n_gru_blocks = n_gru_blocks
# Convolutional recurrent model with z_t as an initial hidden state and inputs the sample
# from the probabilistic model. The architecture of the model is:
# [Spatial GRU - [Bottleneck] x n_res_layers] x n_gru_blocks
self.spatial_grus = []
self.res_blocks = []
for i in range(self.n_gru_blocks):
gru_in_channels = latent_dim if i == 0 else in_channels
self.spatial_grus.append(SpatialGRU(gru_in_channels, in_channels))
self.res_blocks.append(torch.nn.Sequential(*[Bottleneck(in_channels)
for _ in range(n_res_layers)]))
self.spatial_grus = torch.nn.ModuleList(self.spatial_grus)
self.res_blocks = torch.nn.ModuleList(self.res_blocks)
def forward(self, x, hidden_state):
# x has shape (b, n_future, c, h, w), hidden_state (b, c, h, w)
for i in range(self.n_gru_blocks):
x = self.spatial_grus[i](x, hidden_state, flow=None)
b, n_future, c, h, w = x.shape
x = self.res_blocks[i](x.view(b * n_future, c, h, w))
x = x.view(b, n_future, c, h, w)
return x
| 1,488 | 39.243243 | 97 | py |
fiery | fiery-master/fiery/models/fiery.py | import torch
import torch.nn as nn
from fiery.models.encoder import Encoder
from fiery.models.temporal_model import TemporalModelIdentity, TemporalModel
from fiery.models.distributions import DistributionModule
from fiery.models.future_prediction import FuturePrediction
from fiery.models.decoder import Decoder
from fiery.utils.network import pack_sequence_dim, unpack_sequence_dim, set_bn_momentum
from fiery.utils.geometry import cumulative_warp_features, calculate_birds_eye_view_parameters, VoxelsSumming
class Fiery(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
bev_resolution, bev_start_position, bev_dimension = calculate_birds_eye_view_parameters(
self.cfg.LIFT.X_BOUND, self.cfg.LIFT.Y_BOUND, self.cfg.LIFT.Z_BOUND
)
self.bev_resolution = nn.Parameter(bev_resolution, requires_grad=False)
self.bev_start_position = nn.Parameter(bev_start_position, requires_grad=False)
self.bev_dimension = nn.Parameter(bev_dimension, requires_grad=False)
self.encoder_downsample = self.cfg.MODEL.ENCODER.DOWNSAMPLE
self.encoder_out_channels = self.cfg.MODEL.ENCODER.OUT_CHANNELS
self.frustum = self.create_frustum()
self.depth_channels, _, _, _ = self.frustum.shape
if self.cfg.TIME_RECEPTIVE_FIELD == 1:
assert self.cfg.MODEL.TEMPORAL_MODEL.NAME == 'identity'
# temporal block
self.receptive_field = self.cfg.TIME_RECEPTIVE_FIELD
self.n_future = self.cfg.N_FUTURE_FRAMES
self.latent_dim = self.cfg.MODEL.DISTRIBUTION.LATENT_DIM
if self.cfg.MODEL.SUBSAMPLE:
assert self.cfg.DATASET.NAME == 'lyft'
self.receptive_field = 3
self.n_future = 5
# Spatial extent in bird's-eye view, in meters
self.spatial_extent = (self.cfg.LIFT.X_BOUND[1], self.cfg.LIFT.Y_BOUND[1])
self.bev_size = (self.bev_dimension[0].item(), self.bev_dimension[1].item())
# Encoder
self.encoder = Encoder(cfg=self.cfg.MODEL.ENCODER, D=self.depth_channels)
# Temporal model
temporal_in_channels = self.encoder_out_channels
if self.cfg.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE:
temporal_in_channels += 6
if self.cfg.MODEL.TEMPORAL_MODEL.NAME == 'identity':
self.temporal_model = TemporalModelIdentity(temporal_in_channels, self.receptive_field)
elif cfg.MODEL.TEMPORAL_MODEL.NAME == 'temporal_block':
self.temporal_model = TemporalModel(
temporal_in_channels,
self.receptive_field,
input_shape=self.bev_size,
start_out_channels=self.cfg.MODEL.TEMPORAL_MODEL.START_OUT_CHANNELS,
extra_in_channels=self.cfg.MODEL.TEMPORAL_MODEL.EXTRA_IN_CHANNELS,
n_spatial_layers_between_temporal_layers=self.cfg.MODEL.TEMPORAL_MODEL.INBETWEEN_LAYERS,
use_pyramid_pooling=self.cfg.MODEL.TEMPORAL_MODEL.PYRAMID_POOLING,
)
else:
raise NotImplementedError(f'Temporal module {self.cfg.MODEL.TEMPORAL_MODEL.NAME}.')
self.future_pred_in_channels = self.temporal_model.out_channels
if self.n_future > 0:
# probabilistic sampling
if self.cfg.PROBABILISTIC.ENABLED:
# Distribution networks
self.present_distribution = DistributionModule(
self.future_pred_in_channels,
self.latent_dim,
min_log_sigma=self.cfg.MODEL.DISTRIBUTION.MIN_LOG_SIGMA,
max_log_sigma=self.cfg.MODEL.DISTRIBUTION.MAX_LOG_SIGMA,
)
future_distribution_in_channels = (self.future_pred_in_channels
+ self.n_future * self.cfg.PROBABILISTIC.FUTURE_DIM
)
self.future_distribution = DistributionModule(
future_distribution_in_channels,
self.latent_dim,
min_log_sigma=self.cfg.MODEL.DISTRIBUTION.MIN_LOG_SIGMA,
max_log_sigma=self.cfg.MODEL.DISTRIBUTION.MAX_LOG_SIGMA,
)
# Future prediction
self.future_prediction = FuturePrediction(
in_channels=self.future_pred_in_channels,
latent_dim=self.latent_dim,
n_gru_blocks=self.cfg.MODEL.FUTURE_PRED.N_GRU_BLOCKS,
n_res_layers=self.cfg.MODEL.FUTURE_PRED.N_RES_LAYERS,
)
# Decoder
self.decoder = Decoder(
in_channels=self.future_pred_in_channels,
n_classes=len(self.cfg.SEMANTIC_SEG.WEIGHTS),
predict_future_flow=self.cfg.INSTANCE_FLOW.ENABLED,
)
set_bn_momentum(self, self.cfg.MODEL.BN_MOMENTUM)
def create_frustum(self):
# Create grid in image plane
h, w = self.cfg.IMAGE.FINAL_DIM
downsampled_h, downsampled_w = h // self.encoder_downsample, w // self.encoder_downsample
# Depth grid
depth_grid = torch.arange(*self.cfg.LIFT.D_BOUND, dtype=torch.float)
depth_grid = depth_grid.view(-1, 1, 1).expand(-1, downsampled_h, downsampled_w)
n_depth_slices = depth_grid.shape[0]
# x and y grids
x_grid = torch.linspace(0, w - 1, downsampled_w, dtype=torch.float)
x_grid = x_grid.view(1, 1, downsampled_w).expand(n_depth_slices, downsampled_h, downsampled_w)
y_grid = torch.linspace(0, h - 1, downsampled_h, dtype=torch.float)
y_grid = y_grid.view(1, downsampled_h, 1).expand(n_depth_slices, downsampled_h, downsampled_w)
# Dimension (n_depth_slices, downsampled_h, downsampled_w, 3)
# containing data points in the image: left-right, top-bottom, depth
frustum = torch.stack((x_grid, y_grid, depth_grid), -1)
return nn.Parameter(frustum, requires_grad=False)
def forward(self, image, intrinsics, extrinsics, future_egomotion, future_distribution_inputs=None, noise=None):
output = {}
# Only process features from the past and present
image = image[:, :self.receptive_field].contiguous()
intrinsics = intrinsics[:, :self.receptive_field].contiguous()
extrinsics = extrinsics[:, :self.receptive_field].contiguous()
future_egomotion = future_egomotion[:, :self.receptive_field].contiguous()
# Lifting features and project to bird's-eye view
x = self.calculate_birds_eye_view_features(image, intrinsics, extrinsics)
# Warp past features to the present's reference frame
x = cumulative_warp_features(
x.clone(), future_egomotion,
mode='bilinear', spatial_extent=self.spatial_extent,
)
if self.cfg.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE:
b, s, c = future_egomotion.shape
h, w = x.shape[-2:]
future_egomotions_spatial = future_egomotion.view(b, s, c, 1, 1).expand(b, s, c, h, w)
# at time 0, no egomotion so feed zero vector
future_egomotions_spatial = torch.cat([torch.zeros_like(future_egomotions_spatial[:, :1]),
future_egomotions_spatial[:, :(self.receptive_field-1)]], dim=1)
x = torch.cat([x, future_egomotions_spatial], dim=-3)
# Temporal model
states = self.temporal_model(x)
if self.n_future > 0:
present_state = states[:, :1].contiguous()
if self.cfg.PROBABILISTIC.ENABLED:
# Do probabilistic computation
sample, output_distribution = self.distribution_forward(
present_state, future_distribution_inputs, noise
)
output = {**output, **output_distribution}
# Prepare future prediction input
b, _, _, h, w = present_state.shape
hidden_state = present_state[:, 0]
if self.cfg.PROBABILISTIC.ENABLED:
future_prediction_input = sample.expand(-1, self.n_future, -1, -1, -1)
else:
future_prediction_input = hidden_state.new_zeros(b, self.n_future, self.latent_dim, h, w)
# Recursively predict future states
future_states = self.future_prediction(future_prediction_input, hidden_state)
# Concatenate present state
future_states = torch.cat([present_state, future_states], dim=1)
# Predict bird's-eye view outputs
if self.n_future > 0:
bev_output = self.decoder(future_states)
else:
bev_output = self.decoder(states[:, -1:])
output = {**output, **bev_output}
return output
def get_geometry(self, intrinsics, extrinsics):
"""Calculate the (x, y, z) 3D position of the features.
"""
rotation, translation = extrinsics[..., :3, :3], extrinsics[..., :3, 3]
B, N, _ = translation.shape
# Add batch, camera dimension, and a dummy dimension at the end
points = self.frustum.unsqueeze(0).unsqueeze(0).unsqueeze(-1)
# Camera to ego reference frame
points = torch.cat((points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3], points[:, :, :, :, :, 2:3]), 5)
combined_transformation = rotation.matmul(torch.inverse(intrinsics))
points = combined_transformation.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1)
points += translation.view(B, N, 1, 1, 1, 3)
# The 3 dimensions in the ego reference frame are: (forward, sides, height)
return points
def encoder_forward(self, x):
# batch, n_cameras, channels, height, width
b, n, c, h, w = x.shape
x = x.view(b * n, c, h, w)
x = self.encoder(x)
x = x.view(b, n, *x.shape[1:])
x = x.permute(0, 1, 3, 4, 5, 2)
return x
def projection_to_birds_eye_view(self, x, geometry):
""" Adapted from https://github.com/nv-tlabs/lift-splat-shoot/blob/master/src/models.py#L200"""
# batch, n_cameras, depth, height, width, channels
batch, n, d, h, w, c = x.shape
output = torch.zeros(
(batch, c, self.bev_dimension[0], self.bev_dimension[1]), dtype=torch.float, device=x.device
)
# Number of 3D points
N = n * d * h * w
for b in range(batch):
# flatten x
x_b = x[b].reshape(N, c)
# Convert positions to integer indices
geometry_b = ((geometry[b] - (self.bev_start_position - self.bev_resolution / 2.0)) / self.bev_resolution)
geometry_b = geometry_b.view(N, 3).long()
# Mask out points that are outside the considered spatial extent.
mask = (
(geometry_b[:, 0] >= 0)
& (geometry_b[:, 0] < self.bev_dimension[0])
& (geometry_b[:, 1] >= 0)
& (geometry_b[:, 1] < self.bev_dimension[1])
& (geometry_b[:, 2] >= 0)
& (geometry_b[:, 2] < self.bev_dimension[2])
)
x_b = x_b[mask]
geometry_b = geometry_b[mask]
# Sort tensors so that those within the same voxel are consecutives.
ranks = (
geometry_b[:, 0] * (self.bev_dimension[1] * self.bev_dimension[2])
+ geometry_b[:, 1] * (self.bev_dimension[2])
+ geometry_b[:, 2]
)
ranks_indices = ranks.argsort()
x_b, geometry_b, ranks = x_b[ranks_indices], geometry_b[ranks_indices], ranks[ranks_indices]
# Project to bird's-eye view by summing voxels.
x_b, geometry_b = VoxelsSumming.apply(x_b, geometry_b, ranks)
bev_feature = torch.zeros((self.bev_dimension[2], self.bev_dimension[0], self.bev_dimension[1], c),
device=x_b.device)
bev_feature[geometry_b[:, 2], geometry_b[:, 0], geometry_b[:, 1]] = x_b
# Put channel in second position and remove z dimension
bev_feature = bev_feature.permute((0, 3, 1, 2))
bev_feature = bev_feature.squeeze(0)
output[b] = bev_feature
return output
def calculate_birds_eye_view_features(self, x, intrinsics, extrinsics):
b, s, n, c, h, w = x.shape
# Reshape
x = pack_sequence_dim(x)
intrinsics = pack_sequence_dim(intrinsics)
extrinsics = pack_sequence_dim(extrinsics)
geometry = self.get_geometry(intrinsics, extrinsics)
x = self.encoder_forward(x)
x = self.projection_to_birds_eye_view(x, geometry)
x = unpack_sequence_dim(x, b, s)
return x
def distribution_forward(self, present_features, future_distribution_inputs=None, noise=None):
"""
Parameters
----------
present_features: 5-D output from dynamics module with shape (b, 1, c, h, w)
future_distribution_inputs: 5-D tensor containing labels shape (b, s, cfg.PROB_FUTURE_DIM, h, w)
noise: a sample from a (0, 1) gaussian with shape (b, s, latent_dim). If None, will sample in function
Returns
-------
sample: sample taken from present/future distribution, broadcast to shape (b, s, latent_dim, h, w)
present_distribution_mu: shape (b, s, latent_dim)
present_distribution_log_sigma: shape (b, s, latent_dim)
future_distribution_mu: shape (b, s, latent_dim)
future_distribution_log_sigma: shape (b, s, latent_dim)
"""
b, s, _, h, w = present_features.size()
assert s == 1
present_mu, present_log_sigma = self.present_distribution(present_features)
future_mu, future_log_sigma = None, None
if future_distribution_inputs is not None:
# Concatenate future labels to z_t
future_features = future_distribution_inputs[:, 1:].contiguous().view(b, 1, -1, h, w)
future_features = torch.cat([present_features, future_features], dim=2)
future_mu, future_log_sigma = self.future_distribution(future_features)
if noise is None:
if self.training:
noise = torch.randn_like(present_mu)
else:
noise = torch.zeros_like(present_mu)
if self.training:
mu = future_mu
sigma = torch.exp(future_log_sigma)
else:
mu = present_mu
sigma = torch.exp(present_log_sigma)
sample = mu + sigma * noise
# Spatially broadcast sample to the dimensions of present_features
sample = sample.view(b, s, self.latent_dim, 1, 1).expand(b, s, self.latent_dim, h, w)
output_distribution = {
'present_mu': present_mu,
'present_log_sigma': present_log_sigma,
'future_mu': future_mu,
'future_log_sigma': future_log_sigma,
}
return sample, output_distribution
| 15,090 | 43.385294 | 118 | py |
fiery | fiery-master/fiery/models/temporal_model.py | import torch.nn as nn
from fiery.layers.temporal import Bottleneck3D, TemporalBlock
class TemporalModel(nn.Module):
def __init__(
self, in_channels, receptive_field, input_shape, start_out_channels=64, extra_in_channels=0,
n_spatial_layers_between_temporal_layers=0, use_pyramid_pooling=True):
super().__init__()
self.receptive_field = receptive_field
n_temporal_layers = receptive_field - 1
h, w = input_shape
modules = []
block_in_channels = in_channels
block_out_channels = start_out_channels
for _ in range(n_temporal_layers):
if use_pyramid_pooling:
use_pyramid_pooling = True
pool_sizes = [(2, h, w)]
else:
use_pyramid_pooling = False
pool_sizes = None
temporal = TemporalBlock(
block_in_channels,
block_out_channels,
use_pyramid_pooling=use_pyramid_pooling,
pool_sizes=pool_sizes,
)
spatial = [
Bottleneck3D(block_out_channels, block_out_channels, kernel_size=(1, 3, 3))
for _ in range(n_spatial_layers_between_temporal_layers)
]
temporal_spatial_layers = nn.Sequential(temporal, *spatial)
modules.extend(temporal_spatial_layers)
block_in_channels = block_out_channels
block_out_channels += extra_in_channels
self.out_channels = block_in_channels
self.model = nn.Sequential(*modules)
def forward(self, x):
# Reshape input tensor to (batch, C, time, H, W)
x = x.permute(0, 2, 1, 3, 4)
x = self.model(x)
x = x.permute(0, 2, 1, 3, 4).contiguous()
return x[:, (self.receptive_field - 1):]
class TemporalModelIdentity(nn.Module):
def __init__(self, in_channels, receptive_field):
super().__init__()
self.receptive_field = receptive_field
self.out_channels = in_channels
def forward(self, x):
return x[:, (self.receptive_field - 1):]
| 2,120 | 32.666667 | 104 | py |
fiery | fiery-master/fiery/models/encoder.py | import torch.nn as nn
from efficientnet_pytorch import EfficientNet
from fiery.layers.convolutions import UpsamplingConcat
class Encoder(nn.Module):
def __init__(self, cfg, D):
super().__init__()
self.D = D
self.C = cfg.OUT_CHANNELS
self.use_depth_distribution = cfg.USE_DEPTH_DISTRIBUTION
self.downsample = cfg.DOWNSAMPLE
self.version = cfg.NAME.split('-')[1]
self.backbone = EfficientNet.from_pretrained(cfg.NAME)
self.delete_unused_layers()
if self.downsample == 16:
if self.version == 'b0':
upsampling_in_channels = 320 + 112
elif self.version == 'b4':
upsampling_in_channels = 448 + 160
upsampling_out_channels = 512
elif self.downsample == 8:
if self.version == 'b0':
upsampling_in_channels = 112 + 40
elif self.version == 'b4':
upsampling_in_channels = 160 + 56
upsampling_out_channels = 128
else:
raise ValueError(f'Downsample factor {self.downsample} not handled.')
self.upsampling_layer = UpsamplingConcat(upsampling_in_channels, upsampling_out_channels)
if self.use_depth_distribution:
self.depth_layer = nn.Conv2d(upsampling_out_channels, self.C + self.D, kernel_size=1, padding=0)
else:
self.depth_layer = nn.Conv2d(upsampling_out_channels, self.C, kernel_size=1, padding=0)
def delete_unused_layers(self):
indices_to_delete = []
for idx in range(len(self.backbone._blocks)):
if self.downsample == 8:
if self.version == 'b0' and idx > 10:
indices_to_delete.append(idx)
if self.version == 'b4' and idx > 21:
indices_to_delete.append(idx)
for idx in reversed(indices_to_delete):
del self.backbone._blocks[idx]
del self.backbone._conv_head
del self.backbone._bn1
del self.backbone._avg_pooling
del self.backbone._dropout
del self.backbone._fc
def get_features(self, x):
# Adapted from https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py#L231
endpoints = dict()
# Stem
x = self.backbone._swish(self.backbone._bn0(self.backbone._conv_stem(x)))
prev_x = x
# Blocks
for idx, block in enumerate(self.backbone._blocks):
drop_connect_rate = self.backbone._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.backbone._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
prev_x = x
if self.downsample == 8:
if self.version == 'b0' and idx == 10:
break
if self.version == 'b4' and idx == 21:
break
# Head
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
if self.downsample == 16:
input_1, input_2 = endpoints['reduction_5'], endpoints['reduction_4']
elif self.downsample == 8:
input_1, input_2 = endpoints['reduction_4'], endpoints['reduction_3']
x = self.upsampling_layer(input_1, input_2)
return x
def forward(self, x):
x = self.get_features(x) # get feature vector
x = self.depth_layer(x) # feature and depth head
if self.use_depth_distribution:
depth = x[:, : self.D].softmax(dim=1)
x = depth.unsqueeze(1) * x[:, self.D : (self.D + self.C)].unsqueeze(2) # outer product depth and features
else:
x = x.unsqueeze(2).repeat(1, 1, self.D, 1, 1)
return x
| 3,910 | 36.247619 | 119 | py |
fiery | fiery-master/fiery/models/decoder.py | import torch.nn as nn
from torchvision.models.resnet import resnet18
from fiery.layers.convolutions import UpsamplingAdd
class Decoder(nn.Module):
def __init__(self, in_channels, n_classes, predict_future_flow):
super().__init__()
backbone = resnet18(pretrained=False, zero_init_residual=True)
self.first_conv = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = backbone.bn1
self.relu = backbone.relu
self.layer1 = backbone.layer1
self.layer2 = backbone.layer2
self.layer3 = backbone.layer3
self.predict_future_flow = predict_future_flow
shared_out_channels = in_channels
self.up3_skip = UpsamplingAdd(256, 128, scale_factor=2)
self.up2_skip = UpsamplingAdd(128, 64, scale_factor=2)
self.up1_skip = UpsamplingAdd(64, shared_out_channels, scale_factor=2)
self.segmentation_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, n_classes, kernel_size=1, padding=0),
)
self.instance_offset_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
)
self.instance_center_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 1, kernel_size=1, padding=0),
nn.Sigmoid(),
)
if self.predict_future_flow:
self.instance_future_head = nn.Sequential(
nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(shared_out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
)
def forward(self, x):
b, s, c, h, w = x.shape
x = x.view(b * s, c, h, w)
# (H, W)
skip_x = {'1': x}
x = self.first_conv(x)
x = self.bn1(x)
x = self.relu(x)
# (H/4, W/4)
x = self.layer1(x)
skip_x['2'] = x
x = self.layer2(x)
skip_x['3'] = x
# (H/8, W/8)
x = self.layer3(x)
# First upsample to (H/4, W/4)
x = self.up3_skip(x, skip_x['3'])
# Second upsample to (H/2, W/2)
x = self.up2_skip(x, skip_x['2'])
# Third upsample to (H, W)
x = self.up1_skip(x, skip_x['1'])
segmentation_output = self.segmentation_head(x)
instance_center_output = self.instance_center_head(x)
instance_offset_output = self.instance_offset_head(x)
instance_future_output = self.instance_future_head(x) if self.predict_future_flow else None
return {
'segmentation': segmentation_output.view(b, s, *segmentation_output.shape[1:]),
'instance_center': instance_center_output.view(b, s, *instance_center_output.shape[1:]),
'instance_offset': instance_offset_output.view(b, s, *instance_offset_output.shape[1:]),
'instance_flow': instance_future_output.view(b, s, *instance_future_output.shape[1:])
if instance_future_output is not None else None,
}
| 3,676 | 38.967391 | 106 | py |
fiery | fiery-master/fiery/layers/convolutions.py | from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
"""2D convolution followed by
- an optional normalisation (batch norm or instance norm)
- an optional activation (ReLU, LeakyReLU, or tanh)
"""
def __init__(
self,
in_channels,
out_channels=None,
kernel_size=3,
stride=1,
norm='bn',
activation='relu',
bias=False,
transpose=False,
):
super().__init__()
out_channels = out_channels or in_channels
padding = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d if not transpose else partial(nn.ConvTranspose2d, output_padding=1)
self.conv = self.conv(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
if norm == 'bn':
self.norm = nn.BatchNorm2d(out_channels)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(out_channels)
elif norm == 'none':
self.norm = None
else:
raise ValueError('Invalid norm {}'.format(norm))
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.1, inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh(inplace=True)
elif activation == 'none':
self.activation = None
else:
raise ValueError('Invalid activation {}'.format(activation))
def forward(self, x):
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class Bottleneck(nn.Module):
"""
Defines a bottleneck module with a residual connection
"""
def __init__(
self,
in_channels,
out_channels=None,
kernel_size=3,
dilation=1,
groups=1,
upsample=False,
downsample=False,
dropout=0.0,
):
super().__init__()
self._downsample = downsample
bottleneck_channels = int(in_channels / 2)
out_channels = out_channels or in_channels
padding_size = ((kernel_size - 1) * dilation + 1) // 2
# Define the main conv operation
assert dilation == 1
if upsample:
assert not downsample, 'downsample and upsample not possible simultaneously.'
bottleneck_conv = nn.ConvTranspose2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=1,
stride=2,
output_padding=padding_size,
padding=padding_size,
groups=groups,
)
elif downsample:
bottleneck_conv = nn.Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=dilation,
stride=2,
padding=padding_size,
groups=groups,
)
else:
bottleneck_conv = nn.Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
bias=False,
dilation=dilation,
padding=padding_size,
groups=groups,
)
self.layers = nn.Sequential(
OrderedDict(
[
# First projection with 1x1 kernel
('conv_down_project', nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, bias=False)),
('abn_down_project', nn.Sequential(nn.BatchNorm2d(bottleneck_channels),
nn.ReLU(inplace=True))),
# Second conv block
('conv', bottleneck_conv),
('abn', nn.Sequential(nn.BatchNorm2d(bottleneck_channels), nn.ReLU(inplace=True))),
# Final projection with 1x1 kernel
('conv_up_project', nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)),
('abn_up_project', nn.Sequential(nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))),
# Regulariser
('dropout', nn.Dropout2d(p=dropout)),
]
)
)
if out_channels == in_channels and not downsample and not upsample:
self.projection = None
else:
projection = OrderedDict()
if upsample:
projection.update({'upsample_skip_proj': Interpolate(scale_factor=2)})
elif downsample:
projection.update({'upsample_skip_proj': nn.MaxPool2d(kernel_size=2, stride=2)})
projection.update(
{
'conv_skip_proj': nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
'bn_skip_proj': nn.BatchNorm2d(out_channels),
}
)
self.projection = nn.Sequential(projection)
# pylint: disable=arguments-differ
def forward(self, *args):
(x,) = args
x_residual = self.layers(x)
if self.projection is not None:
if self._downsample:
# pad h/w dimensions if they are odd to prevent shape mismatch with residual layer
x = nn.functional.pad(x, (0, x.shape[-1] % 2, 0, x.shape[-2] % 2), value=0)
return x_residual + self.projection(x)
return x_residual + x
class Interpolate(nn.Module):
def __init__(self, scale_factor: int = 2):
super().__init__()
self._interpolate = nn.functional.interpolate
self._scale_factor = scale_factor
# pylint: disable=arguments-differ
def forward(self, x):
return self._interpolate(x, scale_factor=self._scale_factor, mode='bilinear', align_corners=False)
class UpsamplingConcat(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super().__init__()
self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x_to_upsample, x):
x_to_upsample = self.upsample(x_to_upsample)
x_to_upsample = torch.cat([x, x_to_upsample], dim=1)
return self.conv(x_to_upsample)
class UpsamplingAdd(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super().__init__()
self.upsample_layer = nn.Sequential(
nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False),
nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
)
def forward(self, x, x_skip):
x = self.upsample_layer(x)
return x + x_skip
| 7,593 | 34.32093 | 114 | py |
fiery | fiery-master/fiery/layers/temporal.py | from collections import OrderedDict
import torch
import torch.nn as nn
from fiery.layers.convolutions import ConvBlock
from fiery.utils.geometry import warp_features
class SpatialGRU(nn.Module):
"""A GRU cell that takes an input tensor [BxTxCxHxW] and an optional previous state and passes a
convolutional gated recurrent unit over the data"""
def __init__(self, input_size, hidden_size, gru_bias_init=0.0, norm='bn', activation='relu'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.gru_bias_init = gru_bias_init
self.conv_update = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size=3, bias=True, padding=1)
self.conv_reset = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size=3, bias=True, padding=1)
self.conv_state_tilde = ConvBlock(
input_size + hidden_size, hidden_size, kernel_size=3, bias=False, norm=norm, activation=activation
)
def forward(self, x, state=None, flow=None, mode='bilinear'):
# pylint: disable=unused-argument, arguments-differ
# Check size
assert len(x.size()) == 5, 'Input tensor must be BxTxCxHxW.'
b, timesteps, c, h, w = x.size()
assert c == self.input_size, f'feature sizes must match, got input {c} for layer with size {self.input_size}'
# recurrent layers
rnn_output = []
rnn_state = torch.zeros(b, self.hidden_size, h, w, device=x.device) if state is None else state
for t in range(timesteps):
x_t = x[:, t]
if flow is not None:
rnn_state = warp_features(rnn_state, flow[:, t], mode=mode)
# propagate rnn state
rnn_state = self.gru_cell(x_t, rnn_state)
rnn_output.append(rnn_state)
# reshape rnn output to batch tensor
return torch.stack(rnn_output, dim=1)
def gru_cell(self, x, state):
# Compute gates
x_and_state = torch.cat([x, state], dim=1)
update_gate = self.conv_update(x_and_state)
reset_gate = self.conv_reset(x_and_state)
# Add bias to initialise gate as close to identity function
update_gate = torch.sigmoid(update_gate + self.gru_bias_init)
reset_gate = torch.sigmoid(reset_gate + self.gru_bias_init)
# Compute proposal state, activation is defined in norm_act_config (can be tanh, ReLU etc)
state_tilde = self.conv_state_tilde(torch.cat([x, (1.0 - reset_gate) * state], dim=1))
output = (1.0 - update_gate) * state + update_gate * state_tilde
return output
class CausalConv3d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 3, 3), dilation=(1, 1, 1), bias=False):
super().__init__()
assert len(kernel_size) == 3, 'kernel_size must be a 3-tuple.'
time_pad = (kernel_size[0] - 1) * dilation[0]
height_pad = ((kernel_size[1] - 1) * dilation[1]) // 2
width_pad = ((kernel_size[2] - 1) * dilation[2]) // 2
# Pad temporally on the left
self.pad = nn.ConstantPad3d(padding=(width_pad, width_pad, height_pad, height_pad, time_pad, 0), value=0)
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, dilation=dilation, stride=1, padding=0, bias=bias)
self.norm = nn.BatchNorm3d(out_channels)
self.activation = nn.ReLU(inplace=True)
def forward(self, *inputs):
(x,) = inputs
x = self.pad(x)
x = self.conv(x)
x = self.norm(x)
x = self.activation(x)
return x
class CausalMaxPool3d(nn.Module):
def __init__(self, kernel_size=(2, 3, 3)):
super().__init__()
assert len(kernel_size) == 3, 'kernel_size must be a 3-tuple.'
time_pad = kernel_size[0] - 1
height_pad = (kernel_size[1] - 1) // 2
width_pad = (kernel_size[2] - 1) // 2
# Pad temporally on the left
self.pad = nn.ConstantPad3d(padding=(width_pad, width_pad, height_pad, height_pad, time_pad, 0), value=0)
self.max_pool = nn.MaxPool3d(kernel_size, stride=1)
def forward(self, *inputs):
(x,) = inputs
x = self.pad(x)
x = self.max_pool(x)
return x
def conv_1x1x1_norm_activated(in_channels, out_channels):
"""1x1x1 3D convolution, normalization and activation layer."""
return nn.Sequential(
OrderedDict(
[
('conv', nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=False)),
('norm', nn.BatchNorm3d(out_channels)),
('activation', nn.ReLU(inplace=True)),
]
)
)
class Bottleneck3D(nn.Module):
"""
Defines a bottleneck module with a residual connection
"""
def __init__(self, in_channels, out_channels=None, kernel_size=(2, 3, 3), dilation=(1, 1, 1)):
super().__init__()
bottleneck_channels = in_channels // 2
out_channels = out_channels or in_channels
self.layers = nn.Sequential(
OrderedDict(
[
# First projection with 1x1 kernel
('conv_down_project', conv_1x1x1_norm_activated(in_channels, bottleneck_channels)),
# Second conv block
(
'conv',
CausalConv3d(
bottleneck_channels,
bottleneck_channels,
kernel_size=kernel_size,
dilation=dilation,
bias=False,
),
),
# Final projection with 1x1 kernel
('conv_up_project', conv_1x1x1_norm_activated(bottleneck_channels, out_channels)),
]
)
)
if out_channels != in_channels:
self.projection = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm3d(out_channels),
)
else:
self.projection = None
def forward(self, *args):
(x,) = args
x_residual = self.layers(x)
x_features = self.projection(x) if self.projection is not None else x
return x_residual + x_features
class PyramidSpatioTemporalPooling(nn.Module):
""" Spatio-temporal pyramid pooling.
Performs 3D average pooling followed by 1x1x1 convolution to reduce the number of channels and upsampling.
Setting contains a list of kernel_size: usually it is [(2, h, w), (2, h//2, w//2), (2, h//4, w//4)]
"""
def __init__(self, in_channels, reduction_channels, pool_sizes):
super().__init__()
self.features = []
for pool_size in pool_sizes:
assert pool_size[0] == 2, (
"Time kernel should be 2 as PyTorch raises an error when" "padding with more than half the kernel size"
)
stride = (1, *pool_size[1:])
padding = (pool_size[0] - 1, 0, 0)
self.features.append(
nn.Sequential(
OrderedDict(
[
# Pad the input tensor but do not take into account zero padding into the average.
(
'avgpool',
torch.nn.AvgPool3d(
kernel_size=pool_size, stride=stride, padding=padding, count_include_pad=False
),
),
('conv_bn_relu', conv_1x1x1_norm_activated(in_channels, reduction_channels)),
]
)
)
)
self.features = nn.ModuleList(self.features)
def forward(self, *inputs):
(x,) = inputs
b, _, t, h, w = x.shape
# Do not include current tensor when concatenating
out = []
for f in self.features:
# Remove unnecessary padded values (time dimension) on the right
x_pool = f(x)[:, :, :-1].contiguous()
c = x_pool.shape[1]
x_pool = nn.functional.interpolate(
x_pool.view(b * t, c, *x_pool.shape[-2:]), (h, w), mode='bilinear', align_corners=False
)
x_pool = x_pool.view(b, c, t, h, w)
out.append(x_pool)
out = torch.cat(out, 1)
return out
class TemporalBlock(nn.Module):
""" Temporal block with the following layers:
- 2x3x3, 1x3x3, spatio-temporal pyramid pooling
- dropout
- skip connection.
"""
def __init__(self, in_channels, out_channels=None, use_pyramid_pooling=False, pool_sizes=None):
super().__init__()
self.in_channels = in_channels
self.half_channels = in_channels // 2
self.out_channels = out_channels or self.in_channels
self.kernels = [(2, 3, 3), (1, 3, 3)]
# Flag for spatio-temporal pyramid pooling
self.use_pyramid_pooling = use_pyramid_pooling
# 3 convolution paths: 2x3x3, 1x3x3, 1x1x1
self.convolution_paths = []
for kernel_size in self.kernels:
self.convolution_paths.append(
nn.Sequential(
conv_1x1x1_norm_activated(self.in_channels, self.half_channels),
CausalConv3d(self.half_channels, self.half_channels, kernel_size=kernel_size),
)
)
self.convolution_paths.append(conv_1x1x1_norm_activated(self.in_channels, self.half_channels))
self.convolution_paths = nn.ModuleList(self.convolution_paths)
agg_in_channels = len(self.convolution_paths) * self.half_channels
if self.use_pyramid_pooling:
assert pool_sizes is not None, "setting must contain the list of kernel_size, but is None."
reduction_channels = self.in_channels // 3
self.pyramid_pooling = PyramidSpatioTemporalPooling(self.in_channels, reduction_channels, pool_sizes)
agg_in_channels += len(pool_sizes) * reduction_channels
# Feature aggregation
self.aggregation = nn.Sequential(
conv_1x1x1_norm_activated(agg_in_channels, self.out_channels),)
if self.out_channels != self.in_channels:
self.projection = nn.Sequential(
nn.Conv3d(self.in_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm3d(self.out_channels),
)
else:
self.projection = None
def forward(self, *inputs):
(x,) = inputs
x_paths = []
for conv in self.convolution_paths:
x_paths.append(conv(x))
x_residual = torch.cat(x_paths, dim=1)
if self.use_pyramid_pooling:
x_pool = self.pyramid_pooling(x)
x_residual = torch.cat([x_residual, x_pool], dim=1)
x_residual = self.aggregation(x_residual)
if self.out_channels != self.in_channels:
x = self.projection(x)
x = x + x_residual
return x
| 11,152 | 38.549645 | 120 | py |
fiery | fiery-master/fiery/utils/visualisation.py | import numpy as np
import torch
import matplotlib.pylab
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
DEFAULT_COLORMAP = matplotlib.pylab.cm.jet
def flow_to_image(flow: np.ndarray, autoscale: bool = False) -> np.ndarray:
"""
Applies colour map to flow which should be a 2 channel image tensor HxWx2. Returns a HxWx3 numpy image
Code adapted from: https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
u = flow[0, :, :]
v = flow[1, :, :]
# Convert to polar coordinates
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = np.max(rad)
# Normalise flow maps
if autoscale:
u /= maxrad + np.finfo(float).eps
v /= maxrad + np.finfo(float).eps
# visualise flow with cmap
return np.uint8(compute_color(u, v) * 255)
def _normalise(image: np.ndarray) -> np.ndarray:
lower = np.min(image)
delta = np.max(image) - lower
if delta == 0:
delta = 1
image = (image.astype(np.float32) - lower) / delta
return image
def apply_colour_map(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = False
) -> np.ndarray:
"""
Applies a colour map to the given 1 or 2 channel numpy image. if 2 channel, must be 2xHxW.
Returns a HxWx3 numpy image
"""
if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):
if image.ndim == 3:
image = image[0]
# grayscale scalar image
if autoscale:
image = _normalise(image)
return cmap(image)[:, :, :3]
if image.shape[0] == 2:
# 2 dimensional UV
return flow_to_image(image, autoscale=autoscale)
if image.shape[0] == 3:
# normalise rgb channels
if autoscale:
image = _normalise(image)
return np.transpose(image, axes=[1, 2, 0])
raise Exception('Image must be 1, 2 or 3 channel to convert to colour_map (CxHxW)')
def heatmap_image(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = True
) -> np.ndarray:
"""Colorize an 1 or 2 channel image with a colourmap."""
if not issubclass(image.dtype.type, np.floating):
raise ValueError(f"Expected a ndarray of float type, but got dtype {image.dtype}")
if not (image.ndim == 2 or (image.ndim == 3 and image.shape[0] in [1, 2])):
raise ValueError(f"Expected a ndarray of shape [H, W] or [1, H, W] or [2, H, W], but got shape {image.shape}")
heatmap_np = apply_colour_map(image, cmap=cmap, autoscale=autoscale)
heatmap_np = np.uint8(heatmap_np * 255)
return heatmap_np
def compute_color(u: np.ndarray, v: np.ndarray) -> np.ndarray:
assert u.shape == v.shape
[h, w] = u.shape
img = np.zeros([h, w, 3])
nan_mask = np.isnan(u) | np.isnan(v)
u[nan_mask] = 0
v[nan_mask] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
f_k = (a + 1) / 2 * (ncols - 1) + 1
k_0 = np.floor(f_k).astype(int)
k_1 = k_0 + 1
k_1[k_1 == ncols + 1] = 1
f = f_k - k_0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k_0 - 1] / 255
col1 = tmp[k_1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = col * (1 - nan_mask)
return img
def make_color_wheel() -> np.ndarray:
"""
Create colour wheel.
Code adapted from https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
red_yellow = 15
yellow_green = 6
green_cyan = 4
cyan_blue = 11
blue_magenta = 13
magenta_red = 6
ncols = red_yellow + yellow_green + green_cyan + cyan_blue + blue_magenta + magenta_red
colorwheel = np.zeros([ncols, 3])
col = 0
# red_yellow
colorwheel[0:red_yellow, 0] = 255
colorwheel[0:red_yellow, 1] = np.transpose(np.floor(255 * np.arange(0, red_yellow) / red_yellow))
col += red_yellow
# yellow_green
colorwheel[col : col + yellow_green, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, yellow_green) / yellow_green)
)
colorwheel[col : col + yellow_green, 1] = 255
col += yellow_green
# green_cyan
colorwheel[col : col + green_cyan, 1] = 255
colorwheel[col : col + green_cyan, 2] = np.transpose(np.floor(255 * np.arange(0, green_cyan) / green_cyan))
col += green_cyan
# cyan_blue
colorwheel[col : col + cyan_blue, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, cyan_blue) / cyan_blue))
colorwheel[col : col + cyan_blue, 2] = 255
col += cyan_blue
# blue_magenta
colorwheel[col : col + blue_magenta, 2] = 255
colorwheel[col : col + blue_magenta, 0] = np.transpose(np.floor(255 * np.arange(0, blue_magenta) / blue_magenta))
col += +blue_magenta
# magenta_red
colorwheel[col : col + magenta_red, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, magenta_red) / magenta_red))
colorwheel[col : col + magenta_red, 0] = 255
return colorwheel
def make_contour(img, colour=[0, 0, 0], double_line=False):
h, w = img.shape[:2]
out = img.copy()
# Vertical lines
out[np.arange(h), np.repeat(0, h)] = colour
out[np.arange(h), np.repeat(w - 1, h)] = colour
# Horizontal lines
out[np.repeat(0, w), np.arange(w)] = colour
out[np.repeat(h - 1, w), np.arange(w)] = colour
if double_line:
out[np.arange(h), np.repeat(1, h)] = colour
out[np.arange(h), np.repeat(w - 2, h)] = colour
# Horizontal lines
out[np.repeat(1, w), np.arange(w)] = colour
out[np.repeat(h - 2, w), np.arange(w)] = colour
return out
def plot_instance_map(instance_image, instance_map, instance_colours=None, bg_image=None):
if isinstance(instance_image, torch.Tensor):
instance_image = instance_image.cpu().numpy()
assert isinstance(instance_image, np.ndarray)
if instance_colours is None:
instance_colours = generate_instance_colours(instance_map)
if len(instance_image.shape) > 2:
instance_image = instance_image.reshape((instance_image.shape[-2], instance_image.shape[-1]))
if bg_image is None:
plot_image = 255 * np.ones((instance_image.shape[0], instance_image.shape[1], 3), dtype=np.uint8)
else:
plot_image = bg_image
for key, value in instance_colours.items():
plot_image[instance_image == key] = value
return plot_image
def visualise_output(labels, output, cfg):
semantic_colours = np.array([[255, 255, 255], [0, 0, 0]], dtype=np.uint8)
consistent_instance_seg = predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False
)
sequence_length = consistent_instance_seg.shape[1]
b = 0
video = []
for t in range(sequence_length):
out_t = []
# Ground truth
unique_ids = torch.unique(labels['instance'][b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(labels['instance'][b, t].cpu(), instance_map)[::-1, ::-1]
instance_plot = make_contour(instance_plot)
semantic_seg = labels['segmentation'].squeeze(2).cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t][::-1, ::-1]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = labels['flow'][b, t].cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)[::-1, ::-1]
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(labels['centerness'][b, t, 0].cpu().numpy())[::-1, ::-1]
center_plot = make_contour(center_plot)
offset_plot = labels['offset'][b, t].cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)[::-1, ::-1]
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
# Predictions
unique_ids = torch.unique(consistent_instance_seg[b, t]).cpu().numpy()[1:]
instance_map = dict(zip(unique_ids, unique_ids))
instance_plot = plot_instance_map(consistent_instance_seg[b, t].cpu(), instance_map)[::-1, ::-1]
instance_plot = make_contour(instance_plot)
semantic_seg = output['segmentation'].argmax(dim=2).detach().cpu().numpy()
semantic_plot = semantic_colours[semantic_seg[b, t][::-1, ::-1]]
semantic_plot = make_contour(semantic_plot)
if cfg.INSTANCE_FLOW.ENABLED:
future_flow_plot = output['instance_flow'][b, t].detach().cpu().numpy()
future_flow_plot[:, semantic_seg[b, t] != 1] = 0
future_flow_plot = flow_to_image(future_flow_plot)[::-1, ::-1]
future_flow_plot = make_contour(future_flow_plot)
else:
future_flow_plot = np.zeros_like(semantic_plot)
center_plot = heatmap_image(output['instance_center'][b, t, 0].detach().cpu().numpy())[::-1, ::-1]
center_plot = make_contour(center_plot)
offset_plot = output['instance_offset'][b, t].detach().cpu().numpy()
offset_plot[:, semantic_seg[b, t] != 1] = 0
offset_plot = flow_to_image(offset_plot)[::-1, ::-1]
offset_plot = make_contour(offset_plot)
out_t.append(np.concatenate([instance_plot, future_flow_plot,
semantic_plot, center_plot, offset_plot], axis=0))
out_t = np.concatenate(out_t, axis=1)
# Shape (C, H, W)
out_t = out_t.transpose((2, 0, 1))
video.append(out_t)
# Shape (B, T, C, H, W)
video = np.stack(video)[None]
return video
def convert_figure_numpy(figure):
""" Convert figure to numpy image """
figure_np = np.frombuffer(figure.canvas.tostring_rgb(), dtype=np.uint8)
figure_np = figure_np.reshape(figure.canvas.get_width_height()[::-1] + (3,))
return figure_np
def generate_instance_colours(instance_map):
# Most distinct 22 colors (kelly colors from https://stackoverflow.com/questions/470690/how-to-automatically-generate
# -n-distinct-colors)
# plus some colours from AD40k
INSTANCE_COLOURS = np.asarray([
[0, 0, 0],
[255, 179, 0],
[128, 62, 117],
[255, 104, 0],
[166, 189, 215],
[193, 0, 32],
[206, 162, 98],
[129, 112, 102],
[0, 125, 52],
[246, 118, 142],
[0, 83, 138],
[255, 122, 92],
[83, 55, 122],
[255, 142, 0],
[179, 40, 81],
[244, 200, 0],
[127, 24, 13],
[147, 170, 0],
[89, 51, 21],
[241, 58, 19],
[35, 44, 22],
[112, 224, 255],
[70, 184, 160],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[0, 255, 235],
[255, 0, 235],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 255, 204],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[255, 214, 0],
[25, 194, 194],
[92, 0, 255],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
])
return {instance_id: INSTANCE_COLOURS[global_instance_id % len(INSTANCE_COLOURS)] for
instance_id, global_instance_id in instance_map.items()
}
| 12,488 | 32.572581 | 121 | py |
fiery | fiery-master/fiery/utils/network.py | import torch
import torch.nn as nn
import torchvision
def pack_sequence_dim(x):
b, s = x.shape[:2]
return x.view(b * s, *x.shape[2:])
def unpack_sequence_dim(x, b, s):
return x.view(b, s, *x.shape[1:])
def preprocess_batch(batch, device, unsqueeze=False):
for key, value in batch.items():
if key != 'sample_token':
batch[key] = value.to(device)
if unsqueeze:
batch[key] = batch[key].unsqueeze(0)
def set_module_grad(module, requires_grad=False):
for p in module.parameters():
p.requires_grad = requires_grad
def set_bn_momentum(model, momentum=0.1):
for m in model.modules():
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = momentum
class NormalizeInverse(torchvision.transforms.Normalize):
# https://discuss.pytorch.org/t/simple-way-to-inverse-transform-normalization/4821/8
def __init__(self, mean, std):
mean = torch.as_tensor(mean)
std = torch.as_tensor(std)
std_inv = 1 / (std + 1e-7)
mean_inv = -mean * std_inv
super().__init__(mean=mean_inv, std=std_inv)
def __call__(self, tensor):
return super().__call__(tensor.clone())
| 1,236 | 27.113636 | 89 | py |
fiery | fiery-master/fiery/utils/geometry.py | import PIL
import numpy as np
import torch
from pyquaternion import Quaternion
def resize_and_crop_image(img, resize_dims, crop):
# Bilinear resizing followed by cropping
img = img.resize(resize_dims, resample=PIL.Image.BILINEAR)
img = img.crop(crop)
return img
def update_intrinsics(intrinsics, top_crop=0.0, left_crop=0.0, scale_width=1.0, scale_height=1.0):
"""
Parameters
----------
intrinsics: torch.Tensor (3, 3)
top_crop: float
left_crop: float
scale_width: float
scale_height: float
"""
updated_intrinsics = intrinsics.clone()
# Adjust intrinsics scale due to resizing
updated_intrinsics[0, 0] *= scale_width
updated_intrinsics[0, 2] *= scale_width
updated_intrinsics[1, 1] *= scale_height
updated_intrinsics[1, 2] *= scale_height
# Adjust principal point due to cropping
updated_intrinsics[0, 2] -= left_crop
updated_intrinsics[1, 2] -= top_crop
return updated_intrinsics
def calculate_birds_eye_view_parameters(x_bounds, y_bounds, z_bounds):
"""
Parameters
----------
x_bounds: Forward direction in the ego-car.
y_bounds: Sides
z_bounds: Height
Returns
-------
bev_resolution: Bird's-eye view bev_resolution
bev_start_position Bird's-eye view first element
bev_dimension Bird's-eye view tensor spatial dimension
"""
bev_resolution = torch.tensor([row[2] for row in [x_bounds, y_bounds, z_bounds]])
bev_start_position = torch.tensor([row[0] + row[2] / 2.0 for row in [x_bounds, y_bounds, z_bounds]])
bev_dimension = torch.tensor([(row[1] - row[0]) / row[2] for row in [x_bounds, y_bounds, z_bounds]],
dtype=torch.long)
return bev_resolution, bev_start_position, bev_dimension
def convert_egopose_to_matrix_numpy(egopose):
transformation_matrix = np.zeros((4, 4), dtype=np.float32)
rotation = Quaternion(egopose['rotation']).rotation_matrix
translation = np.array(egopose['translation'])
transformation_matrix[:3, :3] = rotation
transformation_matrix[:3, 3] = translation
transformation_matrix[3, 3] = 1.0
return transformation_matrix
def invert_matrix_egopose_numpy(egopose):
""" Compute the inverse transformation of a 4x4 egopose numpy matrix."""
inverse_matrix = np.zeros((4, 4), dtype=np.float32)
rotation = egopose[:3, :3]
translation = egopose[:3, 3]
inverse_matrix[:3, :3] = rotation.T
inverse_matrix[:3, 3] = -np.dot(rotation.T, translation)
inverse_matrix[3, 3] = 1.0
return inverse_matrix
def mat2pose_vec(matrix: torch.Tensor):
"""
Converts a 4x4 pose matrix into a 6-dof pose vector
Args:
matrix (ndarray): 4x4 pose matrix
Returns:
vector (ndarray): 6-dof pose vector comprising translation components (tx, ty, tz) and
rotation components (rx, ry, rz)
"""
# M[1, 2] = -sinx*cosy, M[2, 2] = +cosx*cosy
rotx = torch.atan2(-matrix[..., 1, 2], matrix[..., 2, 2])
# M[0, 2] = +siny, M[1, 2] = -sinx*cosy, M[2, 2] = +cosx*cosy
cosy = torch.sqrt(matrix[..., 1, 2] ** 2 + matrix[..., 2, 2] ** 2)
roty = torch.atan2(matrix[..., 0, 2], cosy)
# M[0, 0] = +cosy*cosz, M[0, 1] = -cosy*sinz
rotz = torch.atan2(-matrix[..., 0, 1], matrix[..., 0, 0])
rotation = torch.stack((rotx, roty, rotz), dim=-1)
# Extract translation params
translation = matrix[..., :3, 3]
return torch.cat((translation, rotation), dim=-1)
def euler2mat(angle: torch.Tensor):
"""Convert euler angles to rotation matrix.
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
angle: rotation angle along 3 axis (in radians) [Bx3]
Returns:
Rotation matrix corresponding to the euler angles [Bx3x3]
"""
shape = angle.shape
angle = angle.view(-1, 3)
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = torch.zeros_like(z)
ones = torch.ones_like(z)
zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).view(-1, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).view(-1, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).view(-1, 3, 3)
rot_mat = xmat.bmm(ymat).bmm(zmat)
rot_mat = rot_mat.view(*shape[:-1], 3, 3)
return rot_mat
def pose_vec2mat(vec: torch.Tensor):
"""
Convert 6DoF parameters to transformation matrix.
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz [B,6]
Returns:
A transformation matrix [B,4,4]
"""
translation = vec[..., :3].unsqueeze(-1) # [...x3x1]
rot = vec[..., 3:].contiguous() # [...x3]
rot_mat = euler2mat(rot) # [...,3,3]
transform_mat = torch.cat([rot_mat, translation], dim=-1) # [...,3,4]
transform_mat = torch.nn.functional.pad(transform_mat, [0, 0, 0, 1], value=0) # [...,4,4]
transform_mat[..., 3, 3] = 1.0
return transform_mat
def invert_pose_matrix(x):
"""
Parameters
----------
x: [B, 4, 4] batch of pose matrices
Returns
-------
out: [B, 4, 4] batch of inverse pose matrices
"""
assert len(x.shape) == 3 and x.shape[1:] == (4, 4), 'Only works for batch of pose matrices.'
transposed_rotation = torch.transpose(x[:, :3, :3], 1, 2)
translation = x[:, :3, 3:]
inverse_mat = torch.cat([transposed_rotation, -torch.bmm(transposed_rotation, translation)], dim=-1) # [B,3,4]
inverse_mat = torch.nn.functional.pad(inverse_mat, [0, 0, 0, 1], value=0) # [B,4,4]
inverse_mat[..., 3, 3] = 1.0
return inverse_mat
def warp_features(x, flow, mode='nearest', spatial_extent=None):
""" Applies a rotation and translation to feature map x.
Args:
x: (b, c, h, w) feature map
flow: (b, 6) 6DoF vector (only uses the xy poriton)
mode: use 'nearest' when dealing with categorical inputs
Returns:
in plane transformed feature map
"""
if flow is None:
return x
b, c, h, w = x.shape
# z-rotation
angle = flow[:, 5].clone() # torch.atan2(flow[:, 1, 0], flow[:, 0, 0])
# x-y translation
translation = flow[:, :2].clone() # flow[:, :2, 3]
# Normalise translation. Need to divide by how many meters is half of the image.
# because translation of 1.0 correspond to translation of half of the image.
translation[:, 0] /= spatial_extent[0]
translation[:, 1] /= spatial_extent[1]
# forward axis is inverted
translation[:, 0] *= -1
cos_theta = torch.cos(angle)
sin_theta = torch.sin(angle)
# output = Rot.input + translation
# tx and ty are inverted as is the case when going from real coordinates to numpy coordinates
# translation_pos_0 -> positive value makes the image move to the left
# translation_pos_1 -> positive value makes the image move to the top
# Angle -> positive value in rad makes the image move in the trigonometric way
transformation = torch.stack([cos_theta, -sin_theta, translation[:, 1],
sin_theta, cos_theta, translation[:, 0]], dim=-1).view(b, 2, 3)
# Note that a rotation will preserve distances only if height = width. Otherwise there's
# resizing going on. e.g. rotation of pi/2 of a 100x200 image will make what's in the center of the image
# elongated.
grid = torch.nn.functional.affine_grid(transformation, size=x.shape, align_corners=False)
warped_x = torch.nn.functional.grid_sample(x, grid.float(), mode=mode, padding_mode='zeros', align_corners=False)
return warped_x
def cumulative_warp_features(x, flow, mode='nearest', spatial_extent=None):
""" Warps a sequence of feature maps by accumulating incremental 2d flow.
x[:, -1] remains unchanged
x[:, -2] is warped using flow[:, -2]
x[:, -3] is warped using flow[:, -3] @ flow[:, -2]
...
x[:, 0] is warped using flow[:, 0] @ ... @ flow[:, -3] @ flow[:, -2]
Args:
x: (b, t, c, h, w) sequence of feature maps
flow: (b, t, 6) sequence of 6 DoF pose
from t to t+1 (only uses the xy poriton)
"""
sequence_length = x.shape[1]
if sequence_length == 1:
return x
flow = pose_vec2mat(flow)
out = [x[:, -1]]
cum_flow = flow[:, -2]
for t in reversed(range(sequence_length - 1)):
out.append(warp_features(x[:, t], mat2pose_vec(cum_flow), mode=mode, spatial_extent=spatial_extent))
# @ is the equivalent of torch.bmm
cum_flow = flow[:, t - 1] @ cum_flow
return torch.stack(out[::-1], 1)
def cumulative_warp_features_reverse(x, flow, mode='nearest', spatial_extent=None):
""" Warps a sequence of feature maps by accumulating incremental 2d flow.
x[:, 0] remains unchanged
x[:, 1] is warped using flow[:, 0].inverse()
x[:, 2] is warped using flow[:, 0].inverse() @ flow[:, 1].inverse()
...
Args:
x: (b, t, c, h, w) sequence of feature maps
flow: (b, t, 6) sequence of 6 DoF pose
from t to t+1 (only uses the xy poriton)
"""
flow = pose_vec2mat(flow)
out = [x[:,0]]
for i in range(1, x.shape[1]):
if i==1:
cum_flow = invert_pose_matrix(flow[:, 0])
else:
cum_flow = cum_flow @ invert_pose_matrix(flow[:,i-1])
out.append( warp_features(x[:,i], mat2pose_vec(cum_flow), mode, spatial_extent=spatial_extent))
return torch.stack(out, 1)
class VoxelsSumming(torch.autograd.Function):
"""Adapted from https://github.com/nv-tlabs/lift-splat-shoot/blob/master/src/tools.py#L193"""
@staticmethod
def forward(ctx, x, geometry, ranks):
"""The features `x` and `geometry` are ranked by voxel positions."""
# Cumulative sum of all features.
x = x.cumsum(0)
# Indicates the change of voxel.
mask = torch.ones(x.shape[0], device=x.device, dtype=torch.bool)
mask[:-1] = ranks[1:] != ranks[:-1]
x, geometry = x[mask], geometry[mask]
# Calculate sum of features within a voxel.
x = torch.cat((x[:1], x[1:] - x[:-1]))
ctx.save_for_backward(mask)
ctx.mark_non_differentiable(geometry)
return x, geometry
@staticmethod
def backward(ctx, grad_x, grad_geometry):
(mask,) = ctx.saved_tensors
# Since the operation is summing, we simply need to send gradient
# to all elements that were part of the summation process.
indices = torch.cumsum(mask, 0)
indices[mask] -= 1
output_grad = grad_x[indices]
return output_grad, None, None
| 10,875 | 33.526984 | 117 | py |
fiery | fiery-master/fiery/utils/instance.py | from typing import Tuple
import torch
import torch.nn.functional as F
import numpy as np
from scipy.optimize import linear_sum_assignment
from fiery.utils.geometry import mat2pose_vec, pose_vec2mat, warp_features
# set ignore index to 0 for vis
def convert_instance_mask_to_center_and_offset_label(instance_img, future_egomotion, num_instances, ignore_index=255,
subtract_egomotion=True, sigma=3, spatial_extent=None):
seq_len, h, w = instance_img.shape
center_label = torch.zeros(seq_len, 1, h, w)
offset_label = ignore_index * torch.ones(seq_len, 2, h, w)
future_displacement_label = ignore_index * torch.ones(seq_len, 2, h, w)
# x is vertical displacement, y is horizontal displacement
x, y = torch.meshgrid(torch.arange(h, dtype=torch.float), torch.arange(w, dtype=torch.float))
if subtract_egomotion:
future_egomotion_inv = mat2pose_vec(pose_vec2mat(future_egomotion).inverse())
# Compute warped instance segmentation
warped_instance_seg = {}
for t in range(1, seq_len):
warped_inst_t = warp_features(instance_img[t].unsqueeze(0).unsqueeze(1).float(),
future_egomotion_inv[t - 1].unsqueeze(0), mode='nearest',
spatial_extent=spatial_extent)
warped_instance_seg[t] = warped_inst_t[0, 0]
# Ignore id 0 which is the background
for instance_id in range(1, num_instances+1):
prev_xc = None
prev_yc = None
prev_mask = None
for t in range(seq_len):
instance_mask = (instance_img[t] == instance_id)
if instance_mask.sum() == 0:
# this instance is not in this frame
prev_xc = None
prev_yc = None
prev_mask = None
continue
xc = x[instance_mask].mean().round().long()
yc = y[instance_mask].mean().round().long()
off_x = xc - x
off_y = yc - y
g = torch.exp(-(off_x ** 2 + off_y ** 2) / sigma ** 2)
center_label[t, 0] = torch.maximum(center_label[t, 0], g)
offset_label[t, 0, instance_mask] = off_x[instance_mask]
offset_label[t, 1, instance_mask] = off_y[instance_mask]
if prev_xc is not None:
# old method
# cur_pt = torch.stack((xc, yc)).unsqueeze(0).float()
# if subtract_egomotion:
# cur_pt = warp_points(cur_pt, future_egomotion_inv[t - 1])
# cur_pt = cur_pt.squeeze(0)
warped_instance_mask = warped_instance_seg[t] == instance_id
if warped_instance_mask.sum() > 0:
warped_xc = x[warped_instance_mask].mean().round()
warped_yc = y[warped_instance_mask].mean().round()
delta_x = warped_xc - prev_xc
delta_y = warped_yc - prev_yc
future_displacement_label[t - 1, 0, prev_mask] = delta_x
future_displacement_label[t - 1, 1, prev_mask] = delta_y
prev_xc = xc
prev_yc = yc
prev_mask = instance_mask
return center_label, offset_label, future_displacement_label
def find_instance_centers(center_prediction: torch.Tensor, conf_threshold: float = 0.1, nms_kernel_size: float = 3):
assert len(center_prediction.shape) == 3
center_prediction = F.threshold(center_prediction, threshold=conf_threshold, value=-1)
nms_padding = (nms_kernel_size - 1) // 2
maxpooled_center_prediction = F.max_pool2d(
center_prediction, kernel_size=nms_kernel_size, stride=1, padding=nms_padding
)
# Filter all elements that are not the maximum (i.e. the center of the heatmap instance)
center_prediction[center_prediction != maxpooled_center_prediction] = -1
return torch.nonzero(center_prediction > 0)[:, 1:]
def group_pixels(centers: torch.Tensor, offset_predictions: torch.Tensor) -> torch.Tensor:
width, height = offset_predictions.shape[-2:]
x_grid = (
torch.arange(width, dtype=offset_predictions.dtype, device=offset_predictions.device)
.view(1, width, 1)
.repeat(1, 1, height)
)
y_grid = (
torch.arange(height, dtype=offset_predictions.dtype, device=offset_predictions.device)
.view(1, 1, height)
.repeat(1, width, 1)
)
pixel_grid = torch.cat((x_grid, y_grid), dim=0)
center_locations = (pixel_grid + offset_predictions).view(2, width * height, 1).permute(2, 1, 0)
centers = centers.view(-1, 1, 2)
distances = torch.norm(centers - center_locations, dim=-1)
instance_id = torch.argmin(distances, dim=0).reshape(1, width, height) + 1
return instance_id
def get_instance_segmentation_and_centers(
center_predictions: torch.Tensor,
offset_predictions: torch.Tensor,
foreground_mask: torch.Tensor,
conf_threshold: float = 0.1,
nms_kernel_size: float = 3,
max_n_instance_centers: int = 100,
) -> Tuple[torch.Tensor, torch.Tensor]:
width, height = center_predictions.shape[-2:]
center_predictions = center_predictions.view(1, width, height)
offset_predictions = offset_predictions.view(2, width, height)
foreground_mask = foreground_mask.view(1, width, height)
centers = find_instance_centers(center_predictions, conf_threshold=conf_threshold, nms_kernel_size=nms_kernel_size)
if not len(centers):
return torch.zeros(center_predictions.shape, dtype=torch.int64, device=center_predictions.device), \
torch.zeros((0, 2), device=centers.device)
if len(centers) > max_n_instance_centers:
print(f'There are a lot of detected instance centers: {centers.shape}')
centers = centers[:max_n_instance_centers].clone()
instance_ids = group_pixels(centers, offset_predictions)
instance_seg = (instance_ids * foreground_mask.float()).long()
# Make the indices of instance_seg consecutive
instance_seg = make_instance_seg_consecutive(instance_seg)
return instance_seg.long(), centers
def update_instance_ids(instance_seg, old_ids, new_ids):
"""
Parameters
----------
instance_seg: torch.Tensor arbitrary shape
old_ids: 1D tensor containing the list of old ids, must be all present in instance_seg.
new_ids: 1D tensor with the new ids, aligned with old_ids
Returns
new_instance_seg: torch.Tensor same shape as instance_seg with new ids
"""
indices = torch.arange(old_ids.max() + 1, device=instance_seg.device)
for old_id, new_id in zip(old_ids, new_ids):
indices[old_id] = new_id
return indices[instance_seg].long()
def make_instance_seg_consecutive(instance_seg):
# Make the indices of instance_seg consecutive
unique_ids = torch.unique(instance_seg)
new_ids = torch.arange(len(unique_ids), device=instance_seg.device)
instance_seg = update_instance_ids(instance_seg, unique_ids, new_ids)
return instance_seg
def make_instance_id_temporally_consistent(pred_inst, future_flow, matching_threshold=3.0):
"""
Parameters
----------
pred_inst: torch.Tensor (1, seq_len, h, w)
future_flow: torch.Tensor(1, seq_len, 2, h, w)
matching_threshold: distance threshold for a match to be valid.
Returns
-------
consistent_instance_seg: torch.Tensor(1, seq_len, h, w)
1. time t. Loop over all detected instances. Use flow to compute new centers at time t+1.
2. Store those centers
3. time t+1. Re-identify instances by comparing position of actual centers, and flow-warped centers.
Make the labels at t+1 consistent with the matching
4. Repeat
"""
assert pred_inst.shape[0] == 1, 'Assumes batch size = 1'
# Initialise instance segmentations with prediction corresponding to the present
consistent_instance_seg = [pred_inst[0, 0]]
largest_instance_id = consistent_instance_seg[0].max().item()
_, seq_len, h, w = pred_inst.shape
device = pred_inst.device
for t in range(seq_len - 1):
# Compute predicted future instance means
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=device), torch.arange(w, dtype=torch.float, device=device)
))
# Add future flow
grid = grid + future_flow[0, t]
warped_centers = []
# Go through all ids, except the background
t_instance_ids = torch.unique(consistent_instance_seg[-1])[1:].cpu().numpy()
if len(t_instance_ids) == 0:
# No instance so nothing to update
consistent_instance_seg.append(pred_inst[0, t + 1])
continue
for instance_id in t_instance_ids:
instance_mask = (consistent_instance_seg[-1] == instance_id)
warped_centers.append(grid[:, instance_mask].mean(dim=1))
warped_centers = torch.stack(warped_centers)
# Compute actual future instance means
centers = []
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=device), torch.arange(w, dtype=torch.float, device=device)
))
n_instances = int(pred_inst[0, t + 1].max().item())
if n_instances == 0:
# No instance, so nothing to update.
consistent_instance_seg.append(pred_inst[0, t + 1])
continue
for instance_id in range(1, n_instances + 1):
instance_mask = (pred_inst[0, t + 1] == instance_id)
centers.append(grid[:, instance_mask].mean(dim=1))
centers = torch.stack(centers)
# Compute distance matrix between warped centers and actual centers
distances = torch.norm(centers.unsqueeze(0) - warped_centers.unsqueeze(1), dim=-1).cpu().numpy()
# outputs (row, col) with row: index in frame t, col: index in frame t+1
# the missing ids in col must be added (correspond to new instances)
ids_t, ids_t_one = linear_sum_assignment(distances)
matching_distances = distances[ids_t, ids_t_one]
# Offset by one as id=0 is the background
ids_t += 1
ids_t_one += 1
# swap ids_t with real ids. as those ids correspond to the position in the distance matrix.
id_mapping = dict(zip(np.arange(1, len(t_instance_ids) + 1), t_instance_ids))
ids_t = np.vectorize(id_mapping.__getitem__, otypes=[np.int64])(ids_t)
# Filter low quality match
ids_t = ids_t[matching_distances < matching_threshold]
ids_t_one = ids_t_one[matching_distances < matching_threshold]
# Elements that are in t+1, but weren't matched
remaining_ids = set(torch.unique(pred_inst[0, t + 1]).cpu().numpy()).difference(set(ids_t_one))
# remove background
remaining_ids.remove(0)
# Set remaining_ids to a new unique id
for remaining_id in list(remaining_ids):
largest_instance_id += 1
ids_t = np.append(ids_t, largest_instance_id)
ids_t_one = np.append(ids_t_one, remaining_id)
consistent_instance_seg.append(update_instance_ids(pred_inst[0, t + 1], old_ids=ids_t_one, new_ids=ids_t))
consistent_instance_seg = torch.stack(consistent_instance_seg).unsqueeze(0)
return consistent_instance_seg
def predict_instance_segmentation_and_trajectories(
output, compute_matched_centers=False, make_consistent=True, vehicles_id=1,
):
preds = output['segmentation'].detach()
preds = torch.argmax(preds, dim=2, keepdims=True)
foreground_masks = preds.squeeze(2) == vehicles_id
batch_size, seq_len = preds.shape[:2]
pred_inst = []
for b in range(batch_size):
pred_inst_batch = []
for t in range(seq_len):
pred_instance_t, _ = get_instance_segmentation_and_centers(
output['instance_center'][b, t].detach(),
output['instance_offset'][b, t].detach(),
foreground_masks[b, t].detach()
)
pred_inst_batch.append(pred_instance_t)
pred_inst.append(torch.stack(pred_inst_batch, dim=0))
pred_inst = torch.stack(pred_inst).squeeze(2)
if make_consistent:
if output['instance_flow'] is None:
print('Using zero flow because instance_future_output is None')
output['instance_flow'] = torch.zeros_like(output['instance_offset'])
consistent_instance_seg = []
for b in range(batch_size):
consistent_instance_seg.append(
make_instance_id_temporally_consistent(pred_inst[b:b+1],
output['instance_flow'][b:b+1].detach())
)
consistent_instance_seg = torch.cat(consistent_instance_seg, dim=0)
else:
consistent_instance_seg = pred_inst
if compute_matched_centers:
assert batch_size == 1
# Generate trajectories
matched_centers = {}
_, seq_len, h, w = consistent_instance_seg.shape
grid = torch.stack(torch.meshgrid(
torch.arange(h, dtype=torch.float, device=preds.device),
torch.arange(w, dtype=torch.float, device=preds.device)
))
for instance_id in torch.unique(consistent_instance_seg[0, 0])[1:].cpu().numpy():
for t in range(seq_len):
instance_mask = consistent_instance_seg[0, t] == instance_id
if instance_mask.sum() > 0:
matched_centers[instance_id] = matched_centers.get(instance_id, []) + [
grid[:, instance_mask].mean(dim=-1)]
for key, value in matched_centers.items():
matched_centers[key] = torch.stack(value).cpu().numpy()[:, ::-1]
return consistent_instance_seg, matched_centers
return consistent_instance_seg
| 13,871 | 40.657658 | 119 | py |
LiDAR2INS | LiDAR2INS-master/ceres/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Ceres Solver documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 20 20:34:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ceres Solver'
copyright = u'2018 Google Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.14'
# The full version, including alpha/beta/rc tags.
release = '1.14.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes",]
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Ceres Solver"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CeresSolverdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CeresSolver.tex', u'Ceres Solver',
u'Sameer Agarwal, Keir Mierle & Others', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceressolver', u'Ceres Solver',
[u'Sameer Agarwal, Keir Mierle & Others'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CeresSolver', u'Ceres Solver',
u'Sameer Agarwal, Keir Mierle & Others', 'CeresSolver', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 7,957 | 31.748971 | 94 | py |
xgboost | xgboost-master/tests/ci_build/test_r_package.py | """Utilities for packaging R code and running tests."""
import argparse
import os
import shutil
import subprocess
from pathlib import Path
from platform import system
from test_utils import R_PACKAGE, ROOT, DirectoryExcursion, cd, print_time, record_time
def get_mingw_bin() -> str:
return os.path.join("c:/rtools40/mingw64/", "bin")
@cd(ROOT)
@record_time
def pack_rpackage() -> Path:
"""Compose the directory used for creating R package tar ball."""
dest = Path("xgboost")
def pkgroot(path: str) -> None:
"""Change makefiles according to the package layout."""
with open(Path("R-package") / "src" / path, "r") as fd:
makefile = fd.read()
makefile = makefile.replace("PKGROOT=../../", "PKGROOT=.", 1)
with open(dest / "src" / path, "w") as fd:
fd.write(makefile)
output = subprocess.run(["git", "clean", "-xdf", "--dry-run"], capture_output=True)
if output.returncode != 0:
raise ValueError("Failed to check git repository status.", output)
would_remove = output.stdout.decode("utf-8").strip().split("\n")
if would_remove and not all(f.find("tests/ci_build") != -1 for f in would_remove):
raise ValueError(
"\n".join(would_remove) + "\nPlease cleanup the working git repository."
)
shutil.copytree("R-package", dest)
os.remove(dest / "demo" / "runall.R")
# core
shutil.copytree("src", dest / "src" / "src")
shutil.copytree("include", dest / "src" / "include")
shutil.copytree("amalgamation", dest / "src" / "amalgamation")
# rabit
rabit = Path("rabit")
os.mkdir(dest / "src" / rabit)
shutil.copytree(rabit / "src", dest / "src" / "rabit" / "src")
shutil.copytree(rabit / "include", dest / "src" / "rabit" / "include")
# dmlc-core
dmlc_core = Path("dmlc-core")
os.mkdir(dest / "src" / dmlc_core)
shutil.copytree(dmlc_core / "include", dest / "src" / "dmlc-core" / "include")
shutil.copytree(dmlc_core / "src", dest / "src" / "dmlc-core" / "src")
# makefile & license
shutil.copyfile("LICENSE", dest / "LICENSE")
osxmakef = dest / "src" / "Makevars.win-e"
if os.path.exists(osxmakef):
os.remove(osxmakef)
pkgroot("Makevars.in")
pkgroot("Makevars.win")
# misc
rwsp = Path("R-package") / "remove_warning_suppression_pragma.sh"
if system() != "Windows":
subprocess.check_call(rwsp)
rwsp = dest / "remove_warning_suppression_pragma.sh"
if system() != "Windows":
subprocess.check_call(rwsp)
os.remove(rwsp)
os.remove(dest / "CMakeLists.txt")
shutil.rmtree(dest / "tests" / "helper_scripts")
return dest
@cd(ROOT)
@record_time
def build_rpackage(path: str) -> str:
def find_tarball() -> str:
found = []
for root, subdir, files in os.walk("."):
for f in files:
if f.endswith(".tar.gz") and f.startswith("xgboost"):
found.append(os.path.join(root, f))
if not found:
raise ValueError("Failed to find output tar ball.")
if len(found) > 1:
raise ValueError("Found more than one packages:", found)
return found[0]
env = os.environ.copy()
print("Ncpus:", f"{os.cpu_count()}")
env.update({"MAKEFLAGS": f"-j{os.cpu_count()}"})
subprocess.check_call([R, "CMD", "build", path], env=env)
tarball = find_tarball()
return tarball
@cd(ROOT)
@record_time
def check_rpackage(path: str) -> None:
env = os.environ.copy()
print("Ncpus:", f"{os.cpu_count()}")
env.update(
{
"MAKEFLAGS": f"-j{os.cpu_count()}",
# cran specific environment variables
"_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_": str(2.5),
}
)
# Actually we don't run this check on windows due to dependency issue.
if system() == "Windows":
# make sure compiler from rtools is used.
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
env.update({"CC": CC, "CXX": CXX})
status = subprocess.run([R, "CMD", "check", "--as-cran", path], env=env)
with open(Path("xgboost.Rcheck") / "00check.log", "r") as fd:
check_log = fd.read()
with open(Path("xgboost.Rcheck") / "00install.out", "r") as fd:
install_log = fd.read()
msg = f"""
----------------------- Install ----------------------
{install_log}
----------------------- Check -----------------------
{check_log}
"""
if status.returncode != 0:
print(msg)
raise ValueError("Failed r package check.")
if check_log.find("WARNING") != -1:
print(msg)
raise ValueError("Has unresolved warnings.")
if check_log.find("Examples with CPU time") != -1:
print(msg)
raise ValueError("Suspicious NOTE.")
@cd(R_PACKAGE)
@record_time
def check_rmarkdown() -> None:
assert system() != "Windows", "Document test doesn't support Windows."
env = os.environ.copy()
env.update({"MAKEFLAGS": f"-j{os.cpu_count()}"})
print("Checking R documentation.")
bin_dir = os.path.dirname(R)
rscript = os.path.join(bin_dir, "Rscript")
subprocess.check_call([rscript, "-e", "roxygen2::roxygenize()"], env=env)
output = subprocess.run(["git", "diff", "--name-only"], capture_output=True)
if len(output.stdout.decode("utf-8").strip()) != 0:
output = subprocess.run(["git", "diff"], capture_output=True)
raise ValueError(
"Please run `roxygen2::roxygenize()`. Diff:\n",
output.stdout.decode("utf-8"),
)
@cd(R_PACKAGE)
@record_time
def test_with_autotools() -> None:
"""Windows only test. No `--as-cran` check, only unittests. We don't want to manage
the dependencies on Windows machine.
"""
assert system() == "Windows"
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
cmd = [R, "CMD", "INSTALL", str(os.path.curdir)]
env = os.environ.copy()
env.update({"CC": CC, "CXX": CXX, "MAKEFLAGS": f"-j{os.cpu_count()}"})
subprocess.check_call(cmd, env=env)
subprocess.check_call(
["R.exe", "-q", "-e", "library(testthat); setwd('tests'); source('testthat.R')"]
)
subprocess.check_call(["R.exe", "-q", "-e", "demo(runall, package = 'xgboost')"])
@record_time
def test_with_cmake(args: argparse.Namespace) -> None:
os.mkdir("build")
with DirectoryExcursion("build"):
if args.compiler == "mingw":
mingw_bin = get_mingw_bin()
CXX = os.path.join(mingw_bin, "g++.exe")
CC = os.path.join(mingw_bin, "gcc.exe")
env = os.environ.copy()
env.update({"CC": CC, "CXX": CXX})
subprocess.check_call(
[
"cmake",
os.path.pardir,
"-DUSE_OPENMP=ON",
"-DR_LIB=ON",
"-DCMAKE_CONFIGURATION_TYPES=Release",
"-G",
"Unix Makefiles",
],
env=env,
)
subprocess.check_call(["make", "-j", "install"])
elif args.compiler == "msvc":
subprocess.check_call(
[
"cmake",
os.path.pardir,
"-DUSE_OPENMP=ON",
"-DR_LIB=ON",
"-DCMAKE_CONFIGURATION_TYPES=Release",
"-A",
"x64",
]
)
subprocess.check_call(
[
"cmake",
"--build",
os.path.curdir,
"--target",
"install",
"--config",
"Release",
]
)
else:
raise ValueError("Wrong compiler")
with DirectoryExcursion(R_PACKAGE):
subprocess.check_call(
[
R,
"-q",
"-e",
"library(testthat); setwd('tests'); source('testthat.R')",
]
)
subprocess.check_call([R, "-q", "-e", "demo(runall, package = 'xgboost')"])
@record_time
def main(args: argparse.Namespace) -> None:
if args.task == "pack":
pack_rpackage()
elif args.task == "build":
src_dir = pack_rpackage()
build_rpackage(src_dir)
elif args.task == "doc":
check_rmarkdown()
elif args.task == "check":
if args.build_tool == "autotools" and system() != "Windows":
src_dir = pack_rpackage()
tarball = build_rpackage(src_dir)
check_rpackage(tarball)
elif args.build_tool == "autotools":
test_with_autotools()
else:
test_with_cmake(args)
else:
raise ValueError("Unexpected task.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Helper script for making R package and running R tests on CI. There are"
" also other helper scripts in the R tests directory for installing"
" dependencies and running linter."
)
)
parser.add_argument(
"--task",
type=str,
choices=["pack", "build", "check", "doc"],
default="check",
required=False,
)
parser.add_argument(
"--compiler",
type=str,
choices=["mingw", "msvc"],
help="Compiler used for compiling CXX code. Only relevant for windows build",
default="mingw",
required=False,
)
parser.add_argument(
"--build-tool",
type=str,
choices=["cmake", "autotools"],
help="Build tool for compiling CXX code and install R package.",
default="autotools",
required=False,
)
parser.add_argument(
"--r",
type=str,
default="R" if system() != "Windows" else "R.exe",
help="Path to the R executable.",
)
args = parser.parse_args()
R = args.r
try:
main(args)
finally:
print_time()
| 10,217 | 31.438095 | 88 | py |
xgboost | xgboost-master/tests/ci_build/tidy.py | #!/usr/bin/env python
import argparse
import json
import os
import re
import shutil
import subprocess
import sys
from multiprocessing import Pool, cpu_count
from time import time
import yaml
def call(args):
'''Subprocess run wrapper.'''
completed = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_msg = completed.stdout.decode('utf-8')
# `workspace` is a name used in Jenkins CI. Normally we should keep the
# dir as `xgboost`.
matched = re.search('(workspace|xgboost)/.*(src|tests|include)/.*warning:',
error_msg,
re.MULTILINE)
if matched is None:
return_code = 0
else:
return_code = 1
return (completed.returncode, return_code, error_msg, args)
class ClangTidy(object):
''' clang tidy wrapper.
Args:
args: Command line arguments.
cpp_lint: Run linter on C++ source code.
cuda_lint: Run linter on CUDA source code.
use_dmlc_gtest: Whether to use gtest bundled in dmlc-core.
'''
def __init__(self, args):
self.cpp_lint = args.cpp
self.cuda_lint = args.cuda
self.use_dmlc_gtest: bool = args.use_dmlc_gtest
self.cuda_archs = args.cuda_archs.copy() if args.cuda_archs else []
if args.tidy_version:
self.exe = 'clang-tidy-' + str(args.tidy_version)
else:
self.exe = 'clang-tidy'
print('Run linter on CUDA: ', self.cuda_lint)
print('Run linter on C++:', self.cpp_lint)
print('Use dmlc gtest:', self.use_dmlc_gtest)
print('CUDA archs:', ' '.join(self.cuda_archs))
if not self.cpp_lint and not self.cuda_lint:
raise ValueError('Both --cpp and --cuda are set to 0.')
self.root_path = os.path.abspath(os.path.curdir)
print('Project root:', self.root_path)
self.cdb_path = os.path.join(self.root_path, 'cdb')
def __enter__(self):
self.start = time()
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self._generate_cdb()
return self
def __exit__(self, *args):
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self.end = time()
print('Finish running clang-tidy:', self.end - self.start)
def _generate_cdb(self):
'''Run CMake to generate compilation database.'''
os.mkdir(self.cdb_path)
os.chdir(self.cdb_path)
cmake_args = ['cmake', '..', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DGOOGLE_TEST=ON']
if self.use_dmlc_gtest:
cmake_args.append('-DUSE_DMLC_GTEST=ON')
else:
cmake_args.append('-DUSE_DMLC_GTEST=OFF')
if self.cuda_lint:
cmake_args.extend(['-DUSE_CUDA=ON', '-DUSE_NCCL=ON'])
if self.cuda_archs:
arch_list = ';'.join(self.cuda_archs)
cmake_args.append(f'-DGPU_COMPUTE_VER={arch_list}')
subprocess.run(cmake_args)
os.chdir(self.root_path)
def convert_nvcc_command_to_clang(self, command):
'''Convert nvcc flags to corresponding clang flags.'''
components = command.split()
compiler: str = components[0]
if compiler.find('nvcc') != -1:
compiler = 'clang++'
components[0] = compiler
# check each component in a command
converted_components = [compiler]
for i in range(1, len(components)):
if components[i] == '-lineinfo':
continue
elif components[i] == '-fuse-ld=gold':
continue
elif components[i] == '-rdynamic':
continue
elif components[i] == "-Xfatbin=-compress-all":
continue
elif components[i] == "-forward-unknown-to-host-compiler":
continue
elif (components[i] == '-x' and
components[i+1] == 'cu'):
# -x cu -> -x cuda
converted_components.append('-x')
converted_components.append('cuda')
components[i+1] = ''
continue
elif components[i].find('-Xcompiler') != -1:
continue
elif components[i].find('--expt') != -1:
continue
elif components[i].find('-ccbin') != -1:
continue
elif components[i].find('--generate-code') != -1:
keyword = 'code=sm'
pos = components[i].find(keyword)
capability = components[i][pos + len(keyword) + 1:
pos + len(keyword) + 3]
if pos != -1:
converted_components.append(
'--cuda-gpu-arch=sm_' + capability)
elif components[i].find('--std=c++14') != -1:
converted_components.append('-std=c++14')
elif components[i].startswith('-isystem='):
converted_components.extend(components[i].split('='))
else:
converted_components.append(components[i])
converted_components.append('-isystem /usr/local/cuda/include/')
command = ''
for c in converted_components:
command = command + ' ' + c
command = command.strip()
return command
def _configure_flags(self, path, command):
src = os.path.join(self.root_path, 'src')
src = src.replace('/', '\\/')
include = os.path.join(self.root_path, 'include')
include = include.replace('/', '\\/')
header_filter = '(' + src + '|' + include + ')'
common_args = [self.exe,
"-header-filter=" + header_filter,
'-config='+self.clang_tidy]
common_args.append(path)
common_args.append('--')
command = self.convert_nvcc_command_to_clang(command)
command = command.split()[1:] # remove clang/c++/g++
if '-c' in command:
index = command.index('-c')
del command[index+1]
command.remove('-c')
if '-o' in command:
index = command.index('-o')
del command[index+1]
command.remove('-o')
common_args.extend(command)
# Two passes, one for device code another for host code.
if path.endswith('cu'):
args = [common_args.copy(), common_args.copy()]
args[0].append('--cuda-host-only')
args[1].append('--cuda-device-only')
else:
args = [common_args.copy()]
for a in args:
a.append('-Wno-unused-command-line-argument')
return args
def _configure(self):
'''Load and configure compile_commands and clang_tidy.'''
def should_lint(path):
if not self.cpp_lint and path.endswith('.cc'):
return False
isxgb = path.find('rabit') == -1
isxgb = isxgb and path.find('dmlc-core') == -1
isxgb = isxgb and (not path.startswith(self.cdb_path))
if isxgb:
print(path)
return True
cdb_file = os.path.join(self.cdb_path, 'compile_commands.json')
with open(cdb_file, 'r') as fd:
self.compile_commands = json.load(fd)
tidy_file = os.path.join(self.root_path, '.clang-tidy')
with open(tidy_file) as fd:
self.clang_tidy = yaml.safe_load(fd)
self.clang_tidy = str(self.clang_tidy)
all_files = []
for entry in self.compile_commands:
path = entry['file']
if should_lint(path):
args = self._configure_flags(path, entry['command'])
all_files.extend(args)
return all_files
def run(self):
'''Run clang-tidy.'''
all_files = self._configure()
passed = True
BAR = '-'*32
with Pool(cpu_count()) as pool:
results = pool.map(call, all_files)
for i, (process_status, tidy_status, msg, args) in enumerate(results):
# Don't enforce clang-tidy to pass for now due to namespace
# for cub in thrust is not correct.
if tidy_status == 1:
passed = False
print(BAR, '\n'
'Command args:', ' '.join(args), ', ',
'Process return code:', process_status, ', ',
'Tidy result code:', tidy_status, ', ',
'Message:\n', msg,
BAR, '\n')
if not passed:
print('Errors in `thrust` namespace can be safely ignored.',
'Please address rest of the clang-tidy warnings.')
return passed
def test_tidy(args):
'''See if clang-tidy and our regex is working correctly. There are
many subtleties we need to be careful. For instances:
* Is the string re-directed to pipe encoded as UTF-8? or is it
bytes?
* On Jenkins there's no 'xgboost' directory, are we catching the
right keywords?
* Should we use re.DOTALL?
* Should we use re.MULTILINE?
Tests here are not thorough, at least we want to guarantee tidy is
not missing anything on Jenkins.
'''
root_path = os.path.abspath(os.path.curdir)
tidy_file = os.path.join(root_path, '.clang-tidy')
test_file_path = os.path.join(root_path,
'tests', 'ci_build', 'test_tidy.cc')
with open(tidy_file) as fd:
tidy_config = fd.read()
tidy_config = str(tidy_config)
tidy_config = '-config='+tidy_config
if not args.tidy_version:
tidy = 'clang-tidy'
else:
tidy = 'clang-tidy-' + str(args.tidy_version)
args = [tidy, tidy_config, test_file_path]
(proc_code, tidy_status, error_msg, _) = call(args)
assert proc_code == 0
assert tidy_status == 1
print('clang-tidy is working.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run clang-tidy.")
parser.add_argument("--cpp", type=int, default=1)
parser.add_argument(
"--tidy-version",
type=int,
default=None,
help="Specify the version of preferred clang-tidy.",
)
parser.add_argument("--cuda", type=int, default=1)
parser.add_argument(
"--use-dmlc-gtest",
action="store_true",
help="Whether to use gtest bundled in dmlc-core.",
)
parser.add_argument(
"--cuda-archs", action="append", help="List of CUDA archs to build"
)
args = parser.parse_args()
test_tidy(args)
with ClangTidy(args) as linter:
passed = linter.run()
if not passed:
sys.exit(1)
| 10,858 | 34.486928 | 82 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.