repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/__init__.py | from .init_torch import list_models, rm_models, copy_weights, cfg, init_weights_by_filling, count_parameters_all, \
count_parameters_trainable
try:
from hooks import Forward_Hook_Handlers, Backward_Hook_Handlers, fw_hook_percentile
except:
pass
| 258 | 31.375 | 115 | py |
RPMG | RPMG-main/ModelNet_Img/pylibs/pytorch_util/libtrain/init_torch.py | import os, sys
from basic.common import env, Open, add_path # rdict
import numpy as np
import math
import torch
import torch.nn as nn
import torchvision
import torch.utils.model_zoo as model_zoo
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
# __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
_cfg = edict(caffemodel=edict(), # To be define later
torchmodel=edict(), # To be define later
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
# PIXEL_MEANS_Imagenet = np.array([[[104.006987932, 116.668767617, 122.678914341]]]),
)
cfg = _cfg
this_dir = os.path.realpath(os.path.dirname(__file__))
base_dir = os.path.realpath(this_dir + '/../pretrained_model.cache')
# default models and pretrained weights.
cfg.caffemodel.alexnet = edict(
proto=os.path.join(base_dir + '/bvlc_alexnet/deploy.prototxt'),
model=os.path.join(base_dir + '/bvlc_alexnet/bvlc_alexnet.caffemodel'),
pkl=os.path.join(base_dir + '/bvlc_alexnet/bvlc_alexnet.pkl'),
input_size=(227, 227),
)
cfg.caffemodel.caffenet = edict(
proto=os.path.join(base_dir + '/bvlc_reference_caffenet/deploy.prototxt'),
model=os.path.join(base_dir + '/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'),
pkl=os.path.join(base_dir + '/bvlc_reference_caffenet/bvlc_reference_caffenet.pkl'),
input_size=(227, 227),
)
cfg.caffemodel.vgg16 = edict(
proto=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_16_layers_deploy.prototxt'),
model=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_16_layers.caffemodel'),
pkl=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_16_layers.pkl'),
input_size=(224, 224),
)
cfg.caffemodel.vgg19 = edict(
proto=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_19_layers_deploy.prototxt'),
model=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_19_layers.caffemodel'),
pkl=os.path.join(base_dir + '/vgg_net/VGG_ILSVRC_19_layers.pkl'),
input_size=(224, 224),
)
cfg.caffemodel.GoogLeNet = edict(
proto=os.path.join(base_dir + '/bvlc_googlenet/deploy.prototxt'),
model=os.path.join(base_dir + '/bvlc_googlenet/bvlc_googlenet.caffemodel'),
input_size=(224, 224),
)
cfg.caffemodel.vggm = edict(
proto=os.path.join(base_dir + '/vgg_net/VGG_CNN_M_deploy.prototxt'),
model=os.path.join(base_dir + '/vgg_net/VGG_CNN_M.caffemodel'),
pkl=os.path.join(base_dir + '/vgg_net/VGG_CNN_M.pkl'),
input_size=(224, 224),
)
cfg.caffemodel.resnet50 = edict(
model=os.path.join(base_dir + '/resnet50-caffe.pth'),
)
cfg.caffemodel.resnet101 = edict(
model=os.path.join(base_dir + '/resnet101-caffe.pth'),
)
cfg.caffemodel.resnet152 = edict(
model=os.path.join(base_dir + '/resnet152-caffe.pth'),
)
# cfg.caffemodel.vgg16 = edict(
# model = os.path.join( base_dir + '/vgg16_caffe.pth'),
# )
# --------------------------------------------------------- [torchmodel]
cfg.torchmodel.alexnet = edict(
# module = torchvision.models.alexnet,
model_url='https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
model=os.path.join(env.Home, '.torch/models/alexnet-owt-4df8aa71.pth'),
input_size=(224, 224),
)
cfg.torchmodel.inception_v3_google = edict(
# module = torchvision.models.alexnet,
model=os.path.join(env.Home, '.torch/models/inception_v3_google-1a9a5a14.pth'),
input_size=(224, 224),
)
cfg.torchmodel.resnet101 = edict(
# module = torchvision.models.alexnet,
model=os.path.join(env.Home, '.torch/models/resnet101-5d3b4d8f.pth'),
# input_size = (224,224),
)
cfg.torchmodel.vgg16 = edict(
model_url='https://download.pytorch.org/models/vgg16-397923af.pth',
# model = os.path.join( env.Home, '.torch/models/resnet101-5d3b4d8f.pth'),
# input_size = (224,224),
)
cfg.torchmodel.vgg19 = edict(
model_url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
# model = os.path.join( env.Home, '.torch/models/resnet101-5d3b4d8f.pth'),
# input_size = (224,224),
)
cfg.torchmodel.vgg16_bn = edict(
model_url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
# model = os.path.join( env.Home, '.torch/models/resnet101-5d3b4d8f.pth'),
# input_size = (224,224),
)
cfg.torchmodel.vgg19_bn = edict(
model_url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# model = os.path.join( env.Home, '.torch/models/resnet101-5d3b4d8f.pth'),
# input_size = (224,224),
)
def list_models(snapshots_dir, marker='iter'):
import re
# collect ".solverstate" and ".caffemodel" files.
_files = os.listdir(snapshots_dir)
reg_valid_name_str = '([()\[\]a-zA-Z0-9\s\.,_+-]+)' # train_iter_20000.pth.tar
reg_model = r"^%s_%s_(\d+).pth.tar$" % (reg_valid_name_str, marker)
# reg_state = r"^%s_iter_(\d+).solverstate$" % reg_valid_name_str
net_names_model = set()
iter_num_model = []
model_files = [x for x in _files if x.endswith('.pth.tar')]
for name in model_files:
match = re.search(reg_model, name)
if match: # match is not None
gs = match.groups()
else:
raise Exception(
'No model matches in snapshots_dir: %s\n\tExist model list: %s' % (snapshots_dir, model_files))
assert len(gs) == 2, '[Exception] in matching .pth.tar file: "%s"' % name
net_name, iter_num = gs[0], int(gs[1])
net_names_model.add(net_name)
iter_num_model.append(iter_num)
assert len(net_names_model) == 1, "None or Multiple net models in this dir: %s " % len(net_names_model)
assert len(set(iter_num_model)) == len(iter_num_model)
iter_num_model.sort()
existed_nums = iter_num_model
net_name = list(net_names_model)[0]
return existed_nums, net_name
def rm_models(snapshots_dir, type='KEEP_LATEST', marker='iter'):
saved_nums, net_name = list_models(snapshots_dir, marker=marker) # ('snapshots')
assert len(saved_nums) > 0, "No models available"
if type == 'KEEP_LATEST':
latest_model_name = '%s_%s_%s.pth.tar' % (net_name, marker, saved_nums[-1]) # '%s_iter_%s.pth.tar'
latest_model_path = os.path.join(snapshots_dir, latest_model_name)
assert os.path.exists(latest_model_path), latest_model_path
if len(saved_nums) == 1: # only has latest model.
return latest_model_name
else:
for it in saved_nums[:-1]:
model_prefix = os.path.join(snapshots_dir, '%s_%s_%s') % (net_name, marker, it)
model_path = model_prefix + '.pth.tar' # '.caffemodel'
assert os.path.exists(model_path)
os.system('rm -f %s' % (model_path))
return latest_model_name
else:
raise NotImplementedError
def get_weights_from_caffesnapeshot(proto_file, model_file):
import caffe
from collections import OrderedDict
import cPickle as pickle
caffe.set_mode_cpu()
net = caffe.Net(proto_file, model_file, caffe.TEST)
model_dict = OrderedDict()
for layer_name, param in net.params.iteritems(): # Most param blob has w and b, but for PReLU there's only w.
learnable_weight = [] # {}
if len(param) == 2:
# learnable_weight['w'] = param[0].data.copy()
# learnable_weight['b'] = param[1].data.copy()
learnable_weight.append(param[0].data.copy())
learnable_weight.append(param[1].data.copy())
elif len(param) == 1:
# learnable_weight['w'] = param[0].data.copy()
learnable_weight.append(param[0].data.copy())
else:
raise NotImplementedError
model_dict[layer_name] = learnable_weight
return model_dict
def _copy_weights_from_caffemodel(own_state, pretrained_type='alexnet', ignore_missing_dst=False,
src2dsts=dict(conv1='conv1', conv2='conv2', conv3='conv3',
conv4='conv4', conv5='conv5', fc6='fc6', fc7='fc7')):
""" src2dsts = dict(conv1='conv1', conv2='conv2', conv3='conv3', conv4='conv4', conv5='conv5')
Or in list:
src2dsts = dict(conv1=['conv1'], conv2=['conv2'], conv3=['conv3'], conv4=['conv4'], conv5=['conv5'])
"""
print("-----------------------")
print("[Info] Copy from %s " % pretrained_type)
print("-----------------------")
if isinstance(pretrained_type, tuple):
proto_file, model_file = pretrained_type
pretrained_weights = get_weights_from_caffesnapeshot(proto_file, model_file)
else:
import pickle
print('Loading: %s' % cfg.caffemodel[pretrained_type].pkl)
pretrained_weights = pickle.load(open(cfg.caffemodel[pretrained_type].pkl, 'rb'), encoding='bytes') # 'bytes' )
print(pretrained_weights.keys())
# print (list(pretrained_weights.keys())[0].decode())
not_copied = list(own_state.keys())
src_list = sorted(src2dsts.keys())
for src in src_list: # src2dsts.iteritems():
dsts = src2dsts[src]
if not isinstance(dsts, list):
dsts = [dsts]
w, b = pretrained_weights[src.encode('utf-8')]
w = torch.from_numpy(w) # cast as pytorch tensor
b = torch.from_numpy(b) # cast as pytorch tensor
# one src can be copied to multiple dsts
for dst in dsts:
if ignore_missing_dst and dst not in own_state.keys(): # net.params.keys():
print('%-20s --> %-20s [ignored] Missing dst.' % (src, dst))
continue
print('%-20s --> %-20s' % (src, dst))
dst_w_name = '%s.weight' % dst
dst_b_name = '%s.bias' % dst
assert dst_w_name in own_state.keys(), "[Error] %s not in %s" % (dst_w_name, own_state.keys())
assert dst_b_name in own_state.keys(), "[Error] %s not in %s" % (dst_b_name, own_state.keys())
# -- Copy w
assert own_state[dst_w_name].shape == w.shape, '[%s] w: dest. %s != src. %s' % (
dst_w_name, own_state[dst_w_name].shape, w.shape)
own_state[dst_w_name].copy_(w)
not_copied.remove(dst_w_name)
# -- Copy b
assert own_state[dst_b_name].shape == b.shape, '[%s] w: dest. %s != src. %s' % (
dst_b_name, own_state[dst_b_name].shape, b.shape)
own_state[dst_b_name].copy_(b)
not_copied.remove(dst_b_name)
for name in not_copied:
if name.endswith('.weight'):
own_state[name].normal_(mean=0.0, std=0.005) # guassian for fc std=0.005, for conv std=0.01
print('%-20s --> %-20s' % ('[filler] gaussian005', name))
elif name.endswith('.bias'):
own_state[name].fill_(0)
print('%-20s --> %-20s' % ('[filler] 0', name))
else:
print("Unknow parameter type: ", name)
raise NotImplementedError
print("-----------------------")
def _copy_weights_from_torchmodel(own_state, pretrained_type='alexnet', strict=True, src2dsts=None):
# wrapper for load_state_dict
from torch.nn.parameter import Parameter
print("-----------------------")
print("[Info] Copy from %s " % pretrained_type)
print("-----------------------")
src_state = model_zoo.load_url(cfg.torchmodel[pretrained_type].model_url)
not_copied = list(own_state.keys())
# print(type(not_copied))
if src2dsts is not None:
for src, dsts in src2dsts.items():
if not isinstance(dsts, list):
dsts = [dsts]
# one src can be copied to multiple dsts
for dst in dsts:
if dst in own_state.keys():
own_state[dst].copy_(src_state[src])
not_copied.remove(dst)
print('%-20s --> %-20s' % (src, dst))
else:
dst_w_name, src_w_name = '%s.weight' % dst, '%s.weight' % src
dst_b_name, src_b_name = '%s.bias' % dst, '%s.bias' % src
if (
dst_w_name not in own_state.keys() or dst_b_name not in own_state.keys()) and not strict: # net.params.keys():
print('%-20s --> %-20s [ignored] Missing dst.' % (src, dst))
continue
print('%-20s --> %-20s' % (src, dst))
# -- Copy w
assert own_state[dst_w_name].shape == src_state[src_w_name].shape, '[%s] w: dest. %s != src. %s' % (
dst_w_name, own_state[dst_w_name].shape, src_state[src_w_name].shape)
own_state[dst_w_name].copy_(src_state[src_w_name])
not_copied.remove(dst_w_name)
# -- Copy b
assert own_state[dst_b_name].shape == src_state[src_b_name].shape, '[%s] w: dest. %s != src. %s' % (
dst_b_name, own_state[dst_b_name].shape, src_state[src_b_name].shape)
own_state[dst_b_name].copy_(src_state[src_b_name])
not_copied.remove(dst_b_name)
else:
for name, param in src_state.items():
if name in own_state: # find in own parameter
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
print('%-30s --> %-30s' % (name, name))
own_state[name].copy_(param)
not_copied.remove(name)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
else:
print('%-30s --> %-30s [ignored] Missing dst.' % (name, name))
for name in not_copied:
if name.endswith('.weight'):
# -# torch.nn.init.xavier_normal(own_state[name])
# -# print '%-30s --> %-30s' % ('[filler] xavier_normal', name)
own_state[name].normal_(mean=0.0, std=0.005) # guassian for fc std=0.005, for conv std=0.01
print('%-20s --> %-20s' % ('[filler] gaussian005', name))
elif name.endswith('.bias'):
own_state[name].fill_(0)
print('%-30s --> %-30s' % ('[filler] 0', name))
elif name.endswith('.running_mean') or name.endswith('.running_var') or name.endswith('num_batches_tracked'):
print('*************************** pass', name)
else:
print("Unknow parameter type: ", name)
raise NotImplementedError
if strict:
missing = set(own_state.keys()) - set(src_state.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
print("-----------------------")
# Copy pretrained weights to net.
# By specify scrs, dsts layer names, this can prevent problem of
# "same layer name has different shape" in traditional weight copy.
def copy_weights(own_state, pretrained_type, **kwargs):
''' Usage example:
copy_weights(own_state, 'torchmodel.alexnet', strict=False)
copy_weights(own_state, 'caffemodel.alexnet', ignore_missing_dst=True, src2dsts={})
copy_weights(own_state, '')
'''
if isinstance(pretrained_type, str):
if pretrained_type.startswith('torchmodel.'):
pretrained_type = pretrained_type[11:] # remove header
copy_func = _copy_weights_from_torchmodel
elif pretrained_type.startswith('caffemodel.'):
pretrained_type = pretrained_type[11:] # remove header
copy_func = _copy_weights_from_caffemodel
else:
print("Unkonw pretrained_type: ", pretrained_type)
raise NotImplementedError
elif isinstance(pretrained_type, tuple):
copy_func = _copy_weights_from_caffemodel
else:
print("Unkonw type(pretrained_type): ", type(pretrained_type))
raise NotImplementedError
copy_func(own_state, pretrained_type, **kwargs)
def init_weights_by_filling(nn_module_or_seq, gaussian_std=0.01, kaiming_normal=True, silent=False):
""" Note: gaussian_std is fully connected layer (nn.Linear) only.
For nn.Conv2d:
If kaiming_normal is enable, nn.Conv2d is initialized by kaiming_normal.
Otherwise, initialized based on kernel size.
"""
if not silent:
print('[init_weights_by_filling] gaussian_std=%s kaiming_normal=%s \n %s' % (
gaussian_std, kaiming_normal, nn_module_or_seq))
for name, m in nn_module_or_seq.named_modules():
if isinstance(m, nn.Conv2d):
if kaiming_normal:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') # 'relu'
else:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None: m.weight.data.fill_(1)
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0, std=gaussian_std) # std=0.01) # ('[filler] gaussian005', name)
m.bias.data.zero_()
# torch.nn.init.xavier_normal(own_state[name])
return nn_module_or_seq
def count_parameters_all(model):
pp = 0
for p in list(model.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def count_parameters_trainable(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 18,165 | 43.415648 | 139 | py |
RPMG | RPMG-main/poselstm-pytorch/train.py | import time
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
opt = TrainOptions().parse()
## SEEDING
import torch
import numpy
import random
torch.manual_seed(opt.seed)
numpy.random.seed(opt.seed)
random.seed(opt.seed)
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
## SEEDING
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = 0
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
visualizer.reset()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
ratio = min(epoch // (opt.niter // 10), 9) / 9
tau = 1/20 + ratio * (1/4 - 1/20)
model.set_input(data, tau)
model.optimize_parameters()
# if total_steps % opt.display_freq == 0:
# save_result = total_steps % opt.update_html_freq == 0
# visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
# if total_steps % opt.save_latest_freq == 0:
# print('saving the latest model (epoch %d, total_steps %d)' %
# (epoch, total_steps))
# model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
| 2,288 | 33.681818 | 98 | py |
RPMG | RPMG-main/poselstm-pytorch/options/base_options.py | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=75, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=224, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=7, help='# of output image channels')
self.parser.add_argument('--lstm_hidden_size', type=int, default=256, help='hidden size of the LSTM layer in PoseLSTM')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='unaligned_posenet', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='posenet', help='chooses which model to use. [posenet | poselstm]')
self.parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=224, help='display window size')
self.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', default=True, help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--seed', type=int, default=0, help='initial random seed for deterministic results')
self.parser.add_argument('--beta', type=float, default=500, help='beta factor used in posenet.')
self.parser.add_argument('--mode', type=str, default='9D_inf', choices=['9D_SVD', '9D_inf', '9D_RPMG',
'6D_GM', '4D_Axis', '6D_RPMG',
'4D_norm', '3D_Euler', '4D_RPMG', '10D', '10D_RPMG'])
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt_'+self.opt.phase+'.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 4,881 | 63.236842 | 228 | py |
RPMG | RPMG-main/poselstm-pytorch/models/base_model.py | import os
import torch
class BaseModel():
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if len(gpu_ids) and torch.cuda.is_available():
network.cuda(gpu_ids[0])
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
network.load_state_dict(torch.load(save_path))
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
| 1,774 | 28.098361 | 78 | py |
RPMG | RPMG-main/poselstm-pytorch/models/networks.py | import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Functions
###############################################################################
def weight_init_googlenet(key, module, weights=None):
if key == "LSTM":
for name, param in module.named_parameters():
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.xavier_normal_(param)
elif weights is None:
init.constant_(module.bias.data, 0.0)
if key == "XYZ":
init.normal_(module.weight.data, 0.0, 0.5)
elif key == "LSTM":
init.xavier_normal_(module.weight.data)
else:
init.normal_(module.weight.data, 0.0, 0.01)
else:
# print(key, weights[(key+"_1").encode()].shape, module.bias.size())
module.bias.data[...] = torch.from_numpy(weights[(key+"_1").encode()])
module.weight.data[...] = torch.from_numpy(weights[(key+"_0").encode()])
return module
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_network(mode, input_nc, lstm_hidden_size, model, init_from=None, isTest=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
if use_gpu:
assert(torch.cuda.is_available())
if model == 'posenet':
netG = PoseNet(input_nc, weights=init_from, isTest=isTest, gpu_ids=gpu_ids)
elif model == 'poselstm':
netG = PoseLSTM(mode, input_nc, lstm_hidden_size, weights=init_from, isTest=isTest, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Model name [%s] is not recognized' % model)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
return netG
##############################################################################
# Classes
##############################################################################
# defines the regression heads for googlenet
class RegressionHead(nn.Module):
def __init__(self, outdim, lossID, weights=None, lstm_hidden_size=None):
super(RegressionHead, self).__init__()
self.has_lstm = lstm_hidden_size != None
dropout_rate = 0.5 if lossID == "loss3" else 0.7
nc_loss = {"loss1": 512, "loss2": 528}
nc_cls = [1024, 2048] if lstm_hidden_size is None else [lstm_hidden_size*4, lstm_hidden_size*4]
self.dropout = nn.Dropout(p=dropout_rate)
if lossID != "loss3":
self.projection = nn.Sequential(*[nn.AvgPool2d(kernel_size=5, stride=3),
weight_init_googlenet(lossID+"/conv", nn.Conv2d(nc_loss[lossID], 128, kernel_size=1), weights),
nn.ReLU(inplace=True)])
self.cls_fc_pose = nn.Sequential(*[weight_init_googlenet(lossID+"/fc", nn.Linear(2048, 1024), weights),
nn.ReLU(inplace=True)])
self.cls_fc_xy = weight_init_googlenet("XYZ", nn.Linear(nc_cls[0], 3))
self.cls_fc_wpqr = weight_init_googlenet("WPQR", nn.Linear(nc_cls[0], outdim))
if lstm_hidden_size is not None:
self.lstm_pose_lr = weight_init_googlenet("LSTM", nn.LSTM(input_size=32, hidden_size=lstm_hidden_size, bidirectional=True, batch_first=True))
self.lstm_pose_ud = weight_init_googlenet("LSTM", nn.LSTM(input_size=32, hidden_size=lstm_hidden_size, bidirectional=True, batch_first=True))
else:
self.projection = nn.AvgPool2d(kernel_size=7, stride=1)
self.cls_fc_pose = nn.Sequential(*[weight_init_googlenet("pose", nn.Linear(1024, 2048)),
nn.ReLU(inplace=True)])
self.cls_fc_xy = weight_init_googlenet("XYZ", nn.Linear(nc_cls[1], 3))
self.cls_fc_wpqr = weight_init_googlenet("WPQR", nn.Linear(nc_cls[1], outdim))
if lstm_hidden_size is not None:
self.lstm_pose_lr = weight_init_googlenet("LSTM", nn.LSTM(input_size=64, hidden_size=lstm_hidden_size, bidirectional=True, batch_first=True))
self.lstm_pose_ud = weight_init_googlenet("LSTM", nn.LSTM(input_size=32, hidden_size=lstm_hidden_size, bidirectional=True, batch_first=True))
def forward(self, input):
output = self.projection(input)
output = self.cls_fc_pose(output.view(output.size(0), -1))
if self.has_lstm:
output = output.view(output.size(0),32, -1)
_, (hidden_state_lr, _) = self.lstm_pose_lr(output.permute(0,1,2))
_, (hidden_state_ud, _) = self.lstm_pose_ud(output.permute(0,2,1))
output = torch.cat((hidden_state_lr[0,:,:],
hidden_state_lr[1,:,:],
hidden_state_ud[0,:,:],
hidden_state_ud[1,:,:]), 1)
output = self.dropout(output)
output_xy = self.cls_fc_xy(output)
output_wpqr = self.cls_fc_wpqr(output)
return [output_xy, output_wpqr]
# define inception block for GoogleNet
class InceptionBlock(nn.Module):
def __init__(self, incp, input_nc, x1_nc, x3_reduce_nc, x3_nc, x5_reduce_nc,
x5_nc, proj_nc, weights=None, gpu_ids=[]):
super(InceptionBlock, self).__init__()
self.gpu_ids = gpu_ids
# first
self.branch_x1 = nn.Sequential(*[
weight_init_googlenet("inception_"+incp+"/1x1", nn.Conv2d(input_nc, x1_nc, kernel_size=1), weights),
nn.ReLU(inplace=True)])
self.branch_x3 = nn.Sequential(*[
weight_init_googlenet("inception_"+incp+"/3x3_reduce", nn.Conv2d(input_nc, x3_reduce_nc, kernel_size=1), weights),
nn.ReLU(inplace=True),
weight_init_googlenet("inception_"+incp+"/3x3", nn.Conv2d(x3_reduce_nc, x3_nc, kernel_size=3, padding=1), weights),
nn.ReLU(inplace=True)])
self.branch_x5 = nn.Sequential(*[
weight_init_googlenet("inception_"+incp+"/5x5_reduce", nn.Conv2d(input_nc, x5_reduce_nc, kernel_size=1), weights),
nn.ReLU(inplace=True),
weight_init_googlenet("inception_"+incp+"/5x5", nn.Conv2d(x5_reduce_nc, x5_nc, kernel_size=5, padding=2), weights),
nn.ReLU(inplace=True)])
self.branch_proj = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
weight_init_googlenet("inception_"+incp+"/pool_proj", nn.Conv2d(input_nc, proj_nc, kernel_size=1), weights),
nn.ReLU(inplace=True)])
if incp in ["3b", "4e"]:
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
self.pool = None
def forward(self, input):
outputs = [self.branch_x1(input), self.branch_x3(input),
self.branch_x5(input), self.branch_proj(input)]
# print([[o.size()] for o in outputs])
output = torch.cat(outputs, 1)
if self.pool is not None:
return self.pool(output)
return output
class PoseNet(nn.Module):
def __init__(self, input_nc, weights=None, isTest=False, gpu_ids=[]):
super(PoseNet, self).__init__()
self.gpu_ids = gpu_ids
self.isTest = isTest
self.before_inception = nn.Sequential(*[
weight_init_googlenet("conv1/7x7_s2", nn.Conv2d(input_nc, 64, kernel_size=7, stride=2, padding=3), weights),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
weight_init_googlenet("conv2/3x3_reduce", nn.Conv2d(64, 64, kernel_size=1), weights),
nn.ReLU(inplace=True),
weight_init_googlenet("conv2/3x3", nn.Conv2d(64, 192, kernel_size=3, padding=1), weights),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
])
self.inception_3a = InceptionBlock("3a", 192, 64, 96, 128, 16, 32, 32, weights, gpu_ids)
self.inception_3b = InceptionBlock("3b", 256, 128, 128, 192, 32, 96, 64, weights, gpu_ids)
self.inception_4a = InceptionBlock("4a", 480, 192, 96, 208, 16, 48, 64, weights, gpu_ids)
self.inception_4b = InceptionBlock("4b", 512, 160, 112, 224, 24, 64, 64, weights, gpu_ids)
self.inception_4c = InceptionBlock("4c", 512, 128, 128, 256, 24, 64, 64, weights, gpu_ids)
self.inception_4d = InceptionBlock("4d", 512, 112, 144, 288, 32, 64, 64, weights, gpu_ids)
self.inception_4e = InceptionBlock("4e", 528, 256, 160, 320, 32, 128, 128, weights, gpu_ids)
self.inception_5a = InceptionBlock("5a", 832, 256, 160, 320, 32, 128, 128, weights, gpu_ids)
self.inception_5b = InceptionBlock("5b", 832, 384, 192, 384, 48, 128, 128, weights, gpu_ids)
# self.cls1_fc = RegressionHead(lossID="loss1", weights=weights)
# self.cls2_fc = RegressionHead(lossID="loss2", weights=weights)
# self.cls3_fc = RegressionHead(lossID="loss3", weights=weights)
# self.model = nn.Sequential(*[self.inception_3a, self.inception_3b,
# self.inception_4a, self.inception_4b,
# self.inception_4c, self.inception_4d,
# self.inception_4e, self.inception_5a,
# self.inception_5b, self.cls1_fc,
# self.cls2_fc, self.cls3_fc
# ])
# if self.isTest:
# self.model.eval() # ensure Dropout is deactivated during test
def forward(self, input):
output_bf = self.before_inception(input)
output_3a = self.inception_3a(output_bf)
output_3b = self.inception_3b(output_3a)
output_4a = self.inception_4a(output_3b)
output_4b = self.inception_4b(output_4a)
output_4c = self.inception_4c(output_4b)
output_4d = self.inception_4d(output_4c)
output_4e = self.inception_4e(output_4d)
output_5a = self.inception_5a(output_4e)
output_5b = self.inception_5b(output_5a)
if not self.isTest:
return self.cls1_fc(output_4a) + self.cls2_fc(output_4d) + self.cls3_fc(output_5b)
return self.cls3_fc(output_5b)
class PoseLSTM(PoseNet):
def __init__(self, mode, input_nc, lstm_hidden_size, weights=None, isTest=False, gpu_ids=[]):
super(PoseLSTM, self).__init__(input_nc, weights, isTest, gpu_ids)
outdim = int(mode.split('D')[0])
self.cls1_fc = RegressionHead(outdim,lossID="loss1", weights=weights, lstm_hidden_size=lstm_hidden_size)
self.cls2_fc = RegressionHead(outdim,lossID="loss2", weights=weights, lstm_hidden_size=lstm_hidden_size)
self.cls3_fc = RegressionHead(outdim,lossID="loss3", weights=weights, lstm_hidden_size=lstm_hidden_size)
self.model = nn.Sequential(*[self.inception_3a, self.inception_3b,
self.inception_4a, self.inception_4b,
self.inception_4c, self.inception_4d,
self.inception_4e, self.inception_5a,
self.inception_5b, self.cls1_fc,
self.cls2_fc, self.cls3_fc
])
if self.isTest:
self.model.eval() # ensure Dropout is deactivated during test
| 12,387 | 51.05042 | 157 | py |
RPMG | RPMG-main/poselstm-pytorch/models/poselstm_model.py | from tracemalloc import get_traced_memory
from builtins import NotImplementedError
import numpy as np
import torch
import torch.nn.functional as F
import os
from collections import OrderedDict
from torch.autograd import Variable
import util.util as util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import pickle
import numpy
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.append(os.path.join(BASEPATH, '..', '..', 'utils'))
import tools
import rpmg
class PoseLSTModel(BaseModel):
def name(self):
return 'PoseLSTModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
# define tensors
self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
opt.fineSize, opt.fineSize)
self.input_B = self.Tensor(opt.batchSize, opt.output_nc)
# load/define networks
googlenet_weights = None
if self.isTrain and opt.init_weights != '':
googlenet_file = open(opt.init_weights, "rb")
googlenet_weights = pickle.load(googlenet_file, encoding="bytes")
googlenet_file.close()
print('initializing the weights from '+ opt.init_weights)
self.mean_image = np.load(os.path.join(opt.dataroot , 'mean_image.npy'))
self.netG = networks.define_network(opt.mode, opt.input_nc, opt.lstm_hidden_size, opt.model,
init_from=googlenet_weights, isTest=not self.isTrain,
gpu_ids = self.gpu_ids)
if not self.isTrain or opt.continue_train:
self.load_network(self.netG, 'G', opt.which_epoch)
if self.isTrain:
self.old_lr = opt.lr
# define loss functions
self.sum_criterion = torch.nn.MSELoss(reduction='sum')
self.mean_criterion = torch.nn.MSELoss(reduction='mean')
# initialize optimizers
self.schedulers = []
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, eps=1,
weight_decay=0.0625,
betas=(self.opt.adambeta1, self.opt.adambeta2))
self.optimizers.append(self.optimizer_G)
# for optimizer in self.optimizers:
# self.schedulers.append(networks.get_scheduler(optimizer, opt))
print('---------- Networks initialized -------------')
# networks.print_network(self.netG)
# print('-----------------------------------------------')
def set_input(self, input, tau):
input_A = input['A']
input_B = input['B']
self.image_paths = input['A_paths']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.tau = tau
self.gt_r = tools.compute_rotation_matrix_from_quaternion(self.input_B[:, 3:])
def forward(self):
self.pred_B = self.netG(self.input_A)
l = len(self.pred_B) // 2
loss_weights = [0.3, 0.3, 1]
for i in range(l):
out_nd = self.pred_B[2*i+1]
if 'RPMG' in self.opt.mode:
out_rmat = rpmg.simple_RPMG.apply(out_nd, self.tau, 0.01, self.opt.beta * loss_weights[i])
else:
if(self.opt.mode == "4D_norm"):
out_rmat = tools.compute_rotation_matrix_from_quaternion(out_nd) #b*3*3
elif(self.opt.mode=="6D_GM"):
out_rmat = tools.compute_rotation_matrix_from_ortho6d(out_nd) #b*3*3
elif(self.opt.mode=="9D_SVD"):
out_rmat = tools.symmetric_orthogonalization(out_nd) # b*3*3
elif (self.opt.mode == "10D"):
out_rmat = tools.compute_rotation_matrix_from_10d(out_nd) # b*3*3
elif (self.opt.mode == "3D_Euler"):
out_rmat = tools.compute_rotation_matrix_from_euler(out_nd) # b*3*3
elif (self.opt.mode == "4D_Axis"):
out_rmat = tools.compute_rotation_matrix_from_axisAngle(out_nd) # b*3*3
else:
raise NotImplementedError
self.pred_B[2*i+1] = out_rmat
# no backprop gradients
def test(self):
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def backward(self):
self.loss_G = 0
# self.loss_pos = 0
# self.loss_ori = 0
loss_weights = [0.3, 0.3, 1]
for l, w in enumerate(loss_weights):
mse_pos = self.mean_criterion(self.pred_B[2*l], self.input_B[:, 0:3])
if 'RPMG' in self.opt.mode:
mse_ori = self.sum_criterion(self.pred_B[2*l+1], self.gt_r)
self.loss_G += mse_pos * w + mse_ori
else:
mse_ori = self.mean_criterion(self.pred_B[2*l+1], self.gt_r)
self.loss_G += (mse_pos + mse_ori*self.opt.beta) * w
# mse_ori = self.sum_criterion(self.pred_B[2*l+1], self.gt_r)
# self.loss_pos += mse_pos.item() * w
# self.loss_ori += mse_ori.item() * w * self.opt.beta
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_G.zero_grad()
self.backward()
self.optimizer_G.step()
def get_current_errors(self):
pos_err = torch.dist(self.pred_B[0], self.input_B[:, 0:3])
ori_err = tools.compute_geodesic_distance_from_two_matrices(self.gt_r, self.pred_B[1])
ori_err = ori_err.mean() * 180 / np.pi
if self.opt.isTrain:
return OrderedDict([('pos_err', pos_err),
('ori_err', ori_err),
])
else:
return [pos_err.item(), ori_err.item()]
def get_current_pose(self):
return numpy.concatenate((self.pred_B[0].data[0].cpu().numpy(),
self.pred_B[1].data[0].cpu().numpy()))
def get_current_visuals(self):
input_A = util.tensor2im(self.input_A.data)
# pred_B = util.tensor2im(self.pred_B.data)
# input_B = util.tensor2im(self.input_B.data)
return OrderedDict([('input_A', input_A)])
def save(self, label):
self.save_network(self.netG, 'G', label, self.gpu_ids)
| 6,529 | 39.308642 | 106 | py |
RPMG | RPMG-main/poselstm-pytorch/util/image_pool.py | import random
import numpy as np
import torch
from torch.autograd import Variable
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return Variable(images)
return_images = []
for image in images:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| 1,116 | 30.914286 | 67 | py |
RPMG | RPMG-main/poselstm-pytorch/util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import inspect, re
import numpy as np
import os
import collections
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print( "\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]) )
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 2,265 | 29.621622 | 97 | py |
RPMG | RPMG-main/poselstm-pytorch/data/custom_dataset_data_loader.py | import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.dataset_mode == 'unaligned_posenet':
from data.unaligned_posenet_dataset import UnalignedPoseNetDataset
dataset = UnalignedPoseNetDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
def init_fn(worker_id):
torch.manual_seed(opt.seed)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
worker_init_fn=init_fn)
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i >= self.opt.max_dataset_size:
break
yield data
| 1,324 | 27.191489 | 75 | py |
RPMG | RPMG-main/poselstm-pytorch/data/unaligned_posenet_dataset.py | import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_posenet_transform
from data.image_folder import make_dataset
from PIL import Image
import PIL
import random
import numpy
class UnalignedPoseNetDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
split_file = os.path.join(self.root , 'dataset_'+opt.phase+'.txt')
self.A_paths = numpy.loadtxt(split_file, dtype=str, delimiter=' ', skiprows=3, usecols=(0))
self.A_paths = [os.path.join(self.root, path) for path in self.A_paths]
self.A_poses = numpy.loadtxt(split_file, dtype=float, delimiter=' ', skiprows=3, usecols=(1,2,3,4,5,6,7))
self.mean_image = numpy.load(os.path.join(self.root , 'mean_image.npy'))
if opt.model == "poselstm":
self.mean_image = None
print("mean image subtraction is deactivated")
self.A_size = len(self.A_paths)
self.transform = get_posenet_transform(opt, self.mean_image)
def __getitem__(self, index):
A_path = self.A_paths[index % self.A_size]
index_A = index % self.A_size
# print('(A, B) = (%d, %d)' % (index_A, index_B))
A_img = Image.open(A_path).convert('RGB')
A_pose = self.A_poses[index % self.A_size]
A = self.transform(A_img)
return {'A': A, 'B': A_pose,
'A_paths': A_path}
def __len__(self):
return self.A_size
def name(self):
return 'UnalignedPoseNetDataset'
| 1,554 | 34.340909 | 113 | py |
RPMG | RPMG-main/poselstm-pytorch/data/base_dataset.py | import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy
import torch
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Resize(opt.loadSize, Image.BICUBIC))
# transform_list.append(transforms.Lambda(
# lambda img: __scale_width(img, opt.loadSize)))
if opt.isTrain:
transform_list.append(transforms.RandomCrop(opt.fineSize))
else:
transform_list.append(transforms.CenterCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_posenet_transform(opt, mean_image):
transform_list = []
transform_list.append(transforms.Resize(opt.loadSize, Image.BICUBIC))
transform_list.append(transforms.Lambda(
lambda img: __subtract_mean(img, mean_image)))
transform_list.append(transforms.Lambda(
lambda img: __crop_image(img, opt.fineSize, opt.isTrain)))
transform_list.append(transforms.Lambda(
lambda img: __to_tensor(img)))
return transforms.Compose(transform_list)
def __scale_width(img, target_width):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
def __subtract_mean(img, mean_image):
if mean_image is None:
return numpy.array(img).astype('float')
return numpy.array(img).astype('float') - mean_image.astype('float')
def __crop_image(img, size, isTrain):
h, w = img.shape[0:2]
# w, h = img.size
if isTrain:
if w == size and h == size:
return img
x = numpy.random.randint(0, w - size)
y = numpy.random.randint(0, h - size)
else:
x = int(round((w - size) / 2.))
y = int(round((h - size) / 2.))
return img[y:y+size, x:x+size, :]
# return img.crop((x, y, x + size, y + size))
def __to_tensor(img):
return torch.from_numpy(img.transpose((2, 0, 1)))
| 2,999 | 34.294118 | 77 | py |
RPMG | RPMG-main/poselstm-pytorch/data/image_folder.py | ###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return sorted(images)
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 1,954 | 27.333333 | 79 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as tfs
from PIL import Image
import scipy.io
import os
from enum import IntEnum
from pytorch3d import transforms as trans
# format
# data['record']: {
# 'filename': string, filename
# 'folder': string
# 'source': struct for database etc
# 'imgname': string, path
# 'size': dimensions
# 'height'
# 'width'
# 'depth'
# 'segmented': 0/1 ?
# 'imgsize': dimensions [h,w,c]
# 'database': source database
# 'objects': list of objects
# 'class': string, class
# 'view': string, Frontal/Rear/Left/Right
# 'bbox': bounding box
# 'bndbox': bounding box as map (might only exist sometimes)
# 'orglabel': string includes a bunch of stuff (might only exist sometimes)
# 'truncated': 0/1
# 'occluded': 0/1
# 'difficult': 0/1
# 'anchors': list of anchors and their coordinates in image, contents depend on class, map between strings and anchors
# 'location': [], or [x,y] position in image
# 'status': something
# 'viewpoint': essentially angle
# 'azimuth_coarse':
# 'azimuth':
# 'elevation_coarse':
# 'elevation':
# 'distance':
# 'px': center
# 'py': center
# 'theta':
# 'error':
# 'interval_azimuth':
# 'interval_elevation':
# 'num_anchor':
# 'viewport':
# 'cad_index': related to which cad was used
# 'polygon': empty list
# 'point': empty list
# 'part': empty list
# 'hasparts': 0/1
# 'actions': list
# 'hasactions': 0/1
# 'mask': 0/1
# sometimes additional dimensions of size 1 is inserted probably due to matlab
def get_mat_element(data):
while isinstance(data, np.ndarray):
if len(data) == 0:
raise PascalParseError("Encountered Empty List")
if len(data) > 1:
x = data[0]
for y in data:
if y != x:
print(data[0])
print(data[1])
raise (Exception("blah" + str(data)))
data = data[0]
return data
def get_mat_list(data):
data_old = data
while len(data) == 1:
data_old = data
data = data[0]
if isinstance(data, np.void):
return data_old
return data
def pascal3d_get_bbox(data):
names = data.dtype.names
if 'bbox' in names:
bbox = data['bbox']
bbox = get_mat_list(bbox)
return list(map(float, bbox))
elif 'bndbox' in names:
raise Exception("NOT IMPLEMENTED")
raise PascalParseError("could not parse bounding box")
class PascalParseError(Exception):
def __init__(self, string):
super().__init__(string)
class PascalClasses(IntEnum):
AEROPLANE = 1
BICYCLE = 2
BOAT = 3
BOTTLE = 4
BUS = 5
CAR = 6
CHAIR = 7
DININGTABLE = 8
MOTORBIKE = 9
SOFA = 10
TRAIN = 11
TVMONITOR = 12
def __str__(self):
return self.name.lower()
pascal_3d_str_enum_map = {}
for v in PascalClasses:
pascal_3d_str_enum_map[str(v)] = v
failed_parse_strings = set()
def pascal3d_get_class(data):
class_str = get_mat_element(data['class'])
try:
return pascal_3d_str_enum_map[class_str.lower()]
except KeyError:
failed_parse_strings.add(class_str.lower())
# print("unknown class: " + class_str)
raise PascalParseError("could not parse class")
def pascal3d_idx_to_str(idx):
return str(PascalClasses(idx))
def parse_single_angle(viewpoint, angle_name):
names = viewpoint.dtype.names
if angle_name in names:
try:
angle = get_mat_element(viewpoint[angle_name])
return float(angle)
except PascalParseError:
pass
angle_name_coarse = angle_name + "_coarse"
if angle_name_coarse in names:
angle = get_mat_element(viewpoint[angle_name_coarse])
return float(angle)
raise PascalParseError("No angle found")
def pascal3d_get_angle(data):
viewpoint = get_mat_element(data['viewpoint'])
azimuth = parse_single_angle(viewpoint, 'azimuth')
elevation = parse_single_angle(viewpoint, 'elevation')
theta = parse_single_angle(viewpoint, 'theta')
if azimuth == 0 and elevation == 0 and theta == 0:
raise PascalParseError("Angle probably not entered")
return [azimuth, elevation, theta] # note in degree
def pascal3d_get_point(data):
viewpoint = get_mat_element(data['viewpoint'])
px = float(get_mat_element(viewpoint['px']))
py = float(get_mat_element(viewpoint['py']))
return [px, py]
def pascal3d_get_distance(data):
viewpoint = get_mat_element(data['viewpoint'])
return float(get_mat_element(viewpoint['distance']))
DICT_BOUNDING_BOX = 'bounding_box'
DICT_CLASS = 'class'
DICT_ANGLE = 'angle'
DICT_OCCLUDED = 'occluded'
DICT_TRUNCATED = 'truncated'
DICT_DIFFICULT = 'difficult'
DICT_POINT = 'px'
DICT_OBJECT_LIST = 'obj_list'
DICT_OBJECT_INSTANCE = 'obj_instance'
DICT_FILENAME = 'filename'
DICT_DISTANCE = 'distance'
DICT_CAMERA = 'camera'
DICT_CAD_INDEX = 'cad_index'
def get_pascal_camera_params(mat_data):
viewpoint = get_mat_element(mat_data['viewpoint'])
try:
focal = get_mat_element(viewpoint['focal'])
except PascalParseError:
print("default_focal")
focal = 1
if focal != 1:
print("focal {}".format(focal))
try:
viewport = get_mat_element(viewpoint['viewport'])
except PascalParseError:
print("default_viewpoer")
viewport = 3000
if viewport != 3000:
print("viewport {}".format(viewport))
return float(focal), float(viewport)
def mat_data_to_dict_data(mat_data, folder):
record = get_mat_element(mat_data['record'])
ret = {}
objects = []
mat_objects = get_mat_list(record['objects'])
for obj in mat_objects:
ret_obj = {}
try:
ret_obj[DICT_BOUNDING_BOX] = pascal3d_get_bbox(obj)
ret_obj[DICT_CLASS] = pascal3d_get_class(obj).value
ret_obj[DICT_ANGLE] = pascal3d_get_angle(obj)
ret_obj[DICT_OCCLUDED] = bool(get_mat_element(obj['occluded']))
ret_obj[DICT_TRUNCATED] = bool(get_mat_element(obj['truncated']))
ret_obj[DICT_POINT] = pascal3d_get_point(obj)
ret_obj[DICT_DIFFICULT] = bool(get_mat_element(obj['difficult']))
ret_obj[DICT_DISTANCE] = pascal3d_get_distance(obj)
ret_obj[DICT_CAMERA] = get_pascal_camera_params(obj)
ret_obj[DICT_CAD_INDEX] = int(get_mat_element(obj['cad_index']))
objects.append(ret_obj)
except PascalParseError as e:
pass
ret[DICT_OBJECT_LIST] = objects
ret[DICT_FILENAME] = os.path.join(folder, get_mat_element(record['filename']))
return ret
def process_annotated_image(
im, left, top, right, bottom, azimuth, elevation, theta,
augment, reverse_theta, crop):
# perturb bbox randomly
# inputs are matlab (start at 1)
if augment:
max_shift = 7
left = left - 1 + np.random.randint(-max_shift, max_shift+1)
top = top - 1 + np.random.randint(-max_shift, max_shift+1)
right = right - 1 + np.random.randint(-max_shift, max_shift+1)
bottom = bottom - 1 + np.random.randint(-max_shift, max_shift+1)
else:
left = left - 1
top = top - 1
right = right - 1
bottom = bottom - 1
width, height = im.size
left = min(max(left,0), width)
top = min(max(top, 0), height)
right = min(max(right, 0), width)
bottom = min(max(bottom, 0), height)
# Resizing can change aspect ratio, so we could adjust the ground truth
# rotation accordingly (leaving as-is since initial results didn't change)
if crop:
im = im.crop((left, top, right, bottom))
im = im.resize([224,224])
# Inputs are in degrees, convert to rad.
az = azimuth* np.pi / 180.0
el = elevation * np.pi / 180.0
th = theta * np.pi / 180.0
# Reversing theta for RenderForCNN data since that theta was set from filename
# which has negative theta (see github.com/ShapeNet/RenderForCNN).
if reverse_theta:
th = -th
if augment:
# Flip
rand = np.random.uniform(0,1)
if rand < 0.5:
az = -az
th = -th
im = im.transpose(Image.FLIP_LEFT_RIGHT)
im = tfs.ColorJitter(brightness=0.1, contrast=0.5, hue=0.2, saturation=0.5)(im)
#im_crop = tf.clip_by_value(im_crop, 0.0, 1.0)
# R = R_z(th) * R_x(el−pi/2) * R_z(−az)
R1 = trans.euler_angles_to_matrix(torch.Tensor([-az, 0, 0]).unsqueeze(0), 'ZYX')
R2 = trans.euler_angles_to_matrix(torch.Tensor([th, 0, el - np.pi / 2.0]).unsqueeze(0), 'ZYX')
R = torch.bmm(R2, R1)
return tfs.ToTensor()(im), R
class PascalParseError(Exception):
def __init__(self, string):
super().__init__(string)
def get_mat_element(data):
while isinstance(data, np.ndarray):
if len(data) == 0:
raise PascalParseError("Encountered Empty List")
if len(data) > 1:
x = data[0]
for y in data:
if y != x:
print(data[0])
print(data[1])
raise(Exception("blah" + str(data)))
data = data[0]
return data
def create_imagenet_anno(data_folder, save_folder, category, validation_split_size=0.3):
ImageNet_anno_folder = os.path.join(data_folder, 'Annotations', category + '_imagenet')
ImageNet_img_folder = os.path.join(data_folder, 'Images', category + '_imagenet')
imagenet_split_train_path = os.path.join(data_folder, 'Image_sets', category + '_imagenet_train.txt')
imagenet_split_test_path = os.path.join(data_folder, 'Image_sets', category + '_imagenet_val.txt')
# train and val
train_val_split = []
with open(imagenet_split_train_path,'r') as f:
while True:
l = f.readline()
if len(l) == 0:
break
while l[-1] in ('\n', '\r'):
l = l[:-1]
if len(l) == 0:
continue
train_val_split.append(l)
train_val_split = sorted(train_val_split)
val_idx = (np.arange(len(train_val_split) * validation_split_size) / validation_split_size).astype(np.int)
val_split = [train_val_split[i] for i in val_idx]
train_split = sorted(list(set(train_val_split) - set(val_split)))
# test
test_split = []
with open(imagenet_split_test_path, 'r') as f:
while True:
l = f.readline()
if len(l) == 0:
break
while l[-1] in ('\n', '\r'):
l = l[:-1]
if len(l) == 0:
continue
test_split.append(l)
split = []
split.append(train_split)
split.append(val_split)
split.append(test_split)
save_path = []
save_path.append(os.path.join(save_folder, category+'_imagenet_train'))
save_path.append(os.path.join(save_folder, category + '_imagenet_val'))
save_path.append(os.path.join(save_folder, category + '_imagenet_test'))
name_lst = ['train', 'val', 'test']
#create new annotation of ImageNet
for i in range(len(name_lst)):
if not os.path.isdir(save_path[i]):
os.mkdir(save_path[i])
for instance in split[i]:
annopath = os.path.join(ImageNet_anno_folder, instance+'.mat')
anno = scipy.io.loadmat(annopath)
dict = mat_data_to_dict_data(anno, ImageNet_img_folder)
for num, obj in enumerate(dict['obj_list']):
obj['imgpath'] = dict['filename']
if obj['occluded'] or obj['truncated'] or obj['difficult']:
continue
save_file = os.path.join(save_path[i], instance+'_'+str(num)+'.npy')
np.save(save_file, obj)
print('Total imagenet %s obj number for %s: %d' %(category, name_lst[i], len(os.listdir(save_path[i]))))
def create_pascal_anno(data_folder, save_folder, category):
pascal_anno_folder = os.path.join(data_folder, 'Annotations', category + '_pascal')
pascal_img_folder = os.path.join(data_folder, 'Images', category + '_pascal')
# train
train_split = os.listdir(pascal_anno_folder)
save_path = os.path.join(save_folder, category+'_pascal_train')
if not os.path.isdir(save_path):
os.mkdir(save_path)
for instance in train_split:
annopath = os.path.join(pascal_anno_folder, instance)
anno = scipy.io.loadmat(annopath)
dict = mat_data_to_dict_data(anno, pascal_img_folder)
for num, obj in enumerate(dict['obj_list']):
obj['imgpath'] = dict['filename']
if obj['occluded'] or obj['truncated'] or obj['difficult']:
continue
save_file = os.path.join(save_path, instance[:-4] + '_' + str(num) + '.npy')
np.save(save_file, obj)
print('Total pascal %s obj number: %d' %(category, len(os.listdir(save_path))))
class Pascal3dDataset(torch.utils.data.Dataset):
def __init__(self, anno_folder, augment=False, voc_train_addition_folder=None):
self.anno_paths = [os.path.join(anno_folder, i) for i in os.listdir(anno_folder)]
if voc_train_addition_folder != None:
self.anno_paths.extend(os.listdir(voc_train_addition_folder))
self.augment = augment
self.size = len(self.anno_paths)
print('Load Pascal Dataset, Length:', self.size)
def __getitem__(self, idx):
annopath = self.anno_paths[idx]
anno = np.load(annopath, allow_pickle=True).item()
imgpath = anno['imgpath']
img = Image.open(imgpath)
left, top, right, bottom = anno['bounding_box']
azimuth, elevation, theta = anno['angle']
img, R = process_annotated_image(img, left, top, right, bottom, azimuth, elevation, theta, augment=self.augment, reverse_theta=False, crop=True)
return img, R
def __len__(self):
return self.size
class SyntheticDataset(torch.utils.data.Dataset):
def __init__(self, syn_folder, category_id):
img_folder = os.path.join(syn_folder, category_id)
instance_lst = os.listdir(img_folder)
self.paths = []
for i in instance_lst:
path_i = os.path.join(img_folder, i)
lst_i = os.listdir(path_i)
for j in lst_i:
if '.png' in j:
self.paths.append(os.path.join(path_i, j))
self.size = len(self.paths)
print('Load Synthetic Dataset, Length:', self.size)
def __getitem__(self, idx):
fpath = self.paths[idx]
img = Image.open(fpath)
anno = fpath.split('_')
azimuth = int(anno[-4][1:])
elevation = int(anno[-3][1:])
theta = int(anno[-2][1:])
img, R = process_annotated_image(img, 0, 0, 0, 0, azimuth, elevation, theta, augment=False,
reverse_theta=True, crop=False)
return img, R
def __len__(self):
return self.size
name2id = {
'aeroplane':'02691156',
'bicycle':'02834778',
'boat':'02858304',
'bottle':'02876657',
'bus':'02924116',
'car':'02958343',
'chair':'03001627',
'diningtable':'04379243',
'motorbike':'03790512',
'sofa': '04256520',
'train':'04468005',
'tvmonitor':'03211117',
}
def get_dataloader(mode, config):
pascal3d_path = config.pascal3d_path
syn_path = config.syn_path
save_anno_path = os.path.join(config.pascal3d_path, 'my_anno')
category = config.category
if config.create_anno and mode == 'train':
if not os.path.isdir(save_anno_path):
os.mkdir(save_anno_path)
create_imagenet_anno(pascal3d_path, save_anno_path, category)
create_pascal_anno(pascal3d_path, save_anno_path, category)
if mode == 'train':
anno_path_train = os.path.join(save_anno_path, category + '_imagenet_train')
if config.voc_train:
anno_path_train_voc = os.path.join(save_anno_path, category + '_pascal_train')
pascal3d_train_dataset = Pascal3dDataset(anno_path_train, augment=True,voc_train_addition_folder=anno_path_train_voc)
else:
pascal3d_train_dataset = Pascal3dDataset(anno_path_train, augment=True)
train_loader1 = DataLoader(pascal3d_train_dataset, batch_size=int(config.batch_size/2), shuffle=True, num_workers=config.num_workers)
syn_train_dataset = SyntheticDataset(syn_path, name2id[category])
train_loader2 = DataLoader(syn_train_dataset, batch_size=int(config.batch_size/2), shuffle=True, num_workers=config.num_workers)
return train_loader1, train_loader2
elif mode == 'test':
anno_path_test = os.path.join(save_anno_path, category + '_imagenet_test')
pascal3d_test_dataset = Pascal3dDataset(anno_path_test)
test_loader = DataLoader(pascal3d_test_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
return test_loader
if __name__ == '__main__':
import argparse
from os.path import join
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--data_root", type=str, default='../dataset')
args = arg_parser.parse_args()
pascal3d_path = join(args.data_root, 'PASCAL3D+_release1.1')
syn_path = join(args.data_root, 'syn_images_cropped_bkg_overlaid')
save_anno_path = os.path.join(pascal3d_path, 'my_anno')
category_lst = ['aeroplane','sofa', 'bicycle','boat','bottle','bus',
'car', 'chair', 'diningtable', 'motorbike', 'train', 'tvmonitor']
if not os.path.isdir(save_anno_path):
os.mkdir(save_anno_path)
#create annotation first!!!
for category in category_lst:
create_imagenet_anno(pascal3d_path, save_anno_path, category)
# # No need to do below line
# create_pascal_anno(pascal3d_path, save_anno_path, category)
'''
anno_path_train = os.path.join(save_anno_path, category+'_imagenet_train')
anno_path_val = os.path.join(save_anno_path, category+'_imagenet_val')
anno_path_test = os.path.join(save_anno_path, category+'_imagenet_test')
pascal3d_train_dataset = Pascal3dDataset(anno_path_train, augment=True)
pascal3d_test_dataset = Pascal3dDataset(anno_path_test)
pascal3d_val_dataset = Pascal3dDataset(anno_path_val)
syn_train_dataset = SyntheticDataset(syn_path, name2id[category])
train_loader1 = DataLoader(pascal3d_train_dataset, batch_size=16, shuffle=True, num_workers=4)
train_loader2 = DataLoader(syn_train_dataset, batch_size=16, shuffle=True, num_workers=4)
#training
for real_data, syn_data in zip(train_loader1,train_loader2):
real_img, real_gt = real_data
syn_img, syn_gt = syn_data
train_data = torch.cat((real_img,syn_img),0) #[32,3,224,224]
train_gt = torch.cat((real_gt,syn_gt),0) #[32,1,3,3]
print(train_data.shape, train_gt.shape)
''' | 19,144 | 35.328273 | 152 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/networks.py | import torch
from torch import nn
from torch import Tensor
from typing import Callable, Any, Optional, List
def get_network(config):
return MobileNetV2(config.num_classes)
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNActivation(nn.Sequential):
def __init__(
self,
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = None,
dilation: int = 1,
) -> None:
padding = (kernel_size - 1) // 2 * dilation
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation_layer is None:
activation_layer = nn.ReLU6
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups,
bias=False),
norm_layer(out_planes),
activation_layer(inplace=True)
)
self.out_channels = out_planes
# necessary for backwards compatibility
ConvBNReLU = ConvBNActivation
class InvertedResidual(nn.Module):
def __init__(
self,
inp: int,
oup: int,
stride: int,
expand_ratio: int,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers: List[nn.Module] = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = stride > 1
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes: int = 1000,
width_mult: float = 1.0,
inverted_residual_setting: Optional[List[List[int]]] = None,
round_nearest: int = 8,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features: List[nn.Module] = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, 256),
nn.BatchNorm1d(256),
nn.ReLU6(inplace=True),
nn.Linear(256, 64),
nn.BatchNorm1d(64),
nn.ReLU6(inplace=True),
nn.Linear(64, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
x = nn.functional.adaptive_avg_pool2d(x, (1, 1)).reshape(x.shape[0], -1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
if __name__ == '__main__':
net = MobileNetV2(6)
input = torch.randn(4, 3, 227, 227)
print(net(input).shape)
| 8,009 | 35.244344 | 116 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/trainval_workdir.py | """
@Author : Shuai Liao
"""
import torch
import torch.optim
import os, sys, time
from time import gmtime, strftime
import numpy as np
from math import pi
from easydict import EasyDict as edict
from collections import OrderedDict as odict
#
from basic.common import Open, env, add_path, RefObj as rdict, argv2dict, is_py3
this_dir = os.path.dirname(os.path.realpath(__file__))
#
from pytorch_util.libtrain.yaml_netconf import parse_yaml, import_module_v2
from pytorch_util.libtrain.tools import get_stripped_DataParallel_state_dict, patch_saved_DataParallel_state_dict
from txt_table_v1 import TxtTable
#
from tensorboardX import SummaryWriter
from tqdm import tqdm
# =========== Parsing from working path =========
pwd = os.getcwd() # Assume: $base_dir/S3.3D_Rotation/{MtdFamily}/{MtdType}
MtdFamily, MtdType = pwd.split(os.sep)[-2:]
# ================================================
# ------- args from convenient run yaml ---------
# For the purpose that no need to specific each run (without argparse)
convenient_run_argv_yaml = \
'''
MtdFamily : {MtdFamily} # e.g. regQuatNet
MtdType : {MtdType} # e.g. reg_Direct, reg_Sexp, reg_Sflat
net_module : {net_module} # same as MtdType here, namely import from 'MtdType'.py
net_arch : {net_arch} # e.g. alexnet, vgg16
base_dir : {base_dir} # e.g. path/to/S3.3D_Rotation
LIB_DIR : {base_dir}/lib
train_view : 100V # 20V #
work_dir : './snapshots/{net_arch}'
nr_epoch : 150
test_step_epoch : 10
this_dir : {this_dir}
base_lr : 0.001
'''.format(net_arch='alexnet',
base_dir=this_dir,
this_dir=pwd,
#
MtdFamily=MtdFamily,
MtdType=MtdType,
net_module=MtdFamily,
)
run_args = parse_yaml(convenient_run_argv_yaml)
# ------- arg from argparse -----------------
import argparse
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('conf_yml_file', default='', type=str, metavar='PATH',
help='path to conf_yml_file (default: none)')
# parser.add_argument('work_dir' , default='', type=str, metavar='PATH',)
# help='path to work_dir (default: none)')
parser.add_argument('gpu_ids', default='', type=str, metavar='PATH',
help='e.g. 0 or 0,1,2,3')
parser.add_argument('--resume', action="store_true", default=False,
help='to resume by the last checkpoint')
parser.add_argument('--pretrain', default=None, type=str, metavar='PATH',
help='path to pretrained checkpoint (default: none)')
parser.add_argument('--optimizer', default='SGD', type=str, help='SGD or Adam')
parser.add_argument('--test_only', action="store_true", default=False,
help='only do test once.')
_pargs, _rest = parser.parse_known_args() # parser.parse_args()
# parse the rest undefined args with "--key=value" form.
_cmd_args = argv2dict(_rest)
_cmd_args.update(vars(_pargs))
#
run_args.update(_cmd_args)
from string import Template
template_str = open(os.path.join(this_dir, 'conf_template.yml')).read()
template = Template(template_str)
print(run_args)
conf_yml_str = template.substitute(run_args)
# -- parse module_yml_file
opt = parse_yaml(conf_yml_str)
#
opt.update(run_args)
#
from ordered_easydict import Ordered_EasyDict as oedict
opt = oedict(opt) # Use opt for reference all configurations.
# ------ Import modules ----------
[(_dataset_module, _dataset_kwargs), netcfg] = import_module_v2(opt.IMPORT_dataset) # pred2angle
[(_net_module, _net_kwargs)] = import_module_v2(opt.IMPORT_makenet) # [_net_type]
[(eval_cates, _), (compute_geo_dists, _)] = import_module_v2(opt.IMPORT_eval.GTbox)
net_arch = opt.net_arch # or _net_kwargs.net_arch
_cfg = netcfg[net_arch] # [opt.net_arch]
np.random.seed(_cfg.RNG_SEED)
torch.manual_seed(_cfg.RNG_SEED)
if opt.use_gpu:
torch.cuda.manual_seed(_cfg.RNG_SEED)
# ---------------------------------------------------------------------------------------------------[dataset]
dataset_test = _dataset_module(collection='test', sampling=0.2, **_dataset_kwargs) #
dataset_train = _dataset_module(collection='train', **_dataset_kwargs) #
# change the sampling of dataset: e.g. sampling: {imagenet:1.0, synthetic:1.0}
# 'ModelNet10/SO3_100V.white_BG_golden_FG'
# From default.run.conf.yml.sh
if opt.cates is None:
opt.cates = dataset_train.cates
cates = opt.cates
if 'batch_size' in opt:
batch_size = opt.batch_size
else:
batch_size = _cfg.TRAIN.BATCH_SIZE
nr_GPUs = len(opt.gpu_ids)
assert nr_GPUs >= 1, opt.gpu_ids
if nr_GPUs > 1:
print('--------------------- Use multiple-GPU %s -------------------------' % opt.gpu_ids)
print(' batch_size = %s' % batch_size) # (batch_size*nr_GPUs)
print(' num_workers = %s' % (opt.num_workers * nr_GPUs))
#
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True, # batch_size*nr_GPUs
num_workers=opt.num_workers * nr_GPUs, pin_memory=opt.pin_memory,
sampler=None)
# ---------------------------------------------------------------------------------------------------[model]
print('[makenet] nr_cate: ', len(cates))
model = _net_module(nr_cate=len(cates), **_net_kwargs) # len(_cfg.cates))
if 'fix_conv1_conv2' in opt.keys() and opt.fix_conv1_conv2:
model.fix_conv1_conv2()
#
watch_targets = model.targets
# ---------------------------------------------------------------------------------------------------[optimizer]
params = []
for name, param in model.named_parameters():
print('----(*) ', name)
if param.requires_grad:
params.append(param)
print('[Optimizer] %s' % opt.optimizer)
if opt.optimizer == 'Adam':
optimizer = torch.optim.Adam(params, lr=opt.base_lr) # , weight_decay=opt.weight_decay)
elif opt.optimizer == 'SGD':
optimizer = torch.optim.SGD(params, opt.base_lr, # model.parameters(), opt.base_lr,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
else:
raise NotImplementedError
work_dir = opt.work_dir
work_dir += '.%s' % opt.train_view
_short_work_dir = os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir)) + 1:]
# global state variables.
start_it = 0
start_epoch = 0
from pytorch_util.libtrain import rm_models, list_models
from pytorch_util.libtrain.reducer import reducer, reducer_group
# Log file.
script_name, _ = os.path.splitext(os.path.basename(__file__))
log_filename = '%s/%s.log' % (work_dir, script_name)
if os.path.exists(log_filename): # backup previous content.
pre_log_content = open(log_filename).read()
logf = Open(log_filename, 'w')
def logprint(s):
print("\r%s " % s)
logf.write(s + "\n")
# -- Resume or use pretrained (Note not imagenet pretrain.)
assert not (opt.resume and opt.pretrain is not None), 'Only resume or pretrain can exist.'
if opt.resume:
iter_nums, net_name = list_models(work_dir) # ('snapshots')
assert len(iter_nums) > 0, "No models available"
latest_model_name = os.path.join(work_dir, '%s_iter_%s.pth.tar' % (net_name, iter_nums[-1]))
print('\n\nResuming from: %s \n\n' % latest_model_name)
if os.path.isfile(latest_model_name):
print("=> loading checkpoint '{}'".format(latest_model_name))
checkpoint = torch.load(latest_model_name)
start_it, start_epoch = checkpoint['it_and_epoch'] # mainly for control lr: (it, epoch)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# fix for pytorch 4.0.x [https://github.com/jwyang/faster-rcnn.pytorch/issues/222]
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
print("=> loaded checkpoint '{}' (it_and_epoch {})"
.format(latest_model_name, checkpoint['it_and_epoch']))
else:
print("=> no checkpoint found at '{}'".format(latest_model_name)) # unnecessary line
elif opt.pretrain is not None:
print('\n\nUsing pretrained: %s \n\n' % opt.pretrain)
if os.path.isfile(opt.pretrain):
print("=> loading checkpoint '{}'".format(opt.pretrain))
checkpoint = torch.load(opt.pretrain)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (it_and_epoch {})"
.format(opt.pretrain, checkpoint['it_and_epoch']))
else:
print("=> no checkpoint found at '{}'".format(opt.pretrain))
# Check if use multi-gpus.
# Note should be after any "model.load_state_dict()" call!
if opt.use_gpu:
# model.cuda()
if nr_GPUs > 1: # multi-GPUs opt['mGPUs']:
# see: https://github.com/pytorch/examples/blob/master/imagenet/main.py
if net_arch.startswith('alexnet'):
model.trunk.Convs = torch.nn.DataParallel(model.trunk.Convs)
elif net_arch.startswith('vgg'):
model.trunk.features = torch.nn.DataParallel(model.trunk.features)
else:
model.trunk = torch.nn.DataParallel(model.trunk)
model.cuda()
if not os.path.exists(work_dir):
print("[Make new dir] ", work_dir)
os.makedirs(work_dir)
disp_interval = 10 if ('disp_interval' not in opt) else opt['disp_interval']
"""
(nr_iter * batch_size)/nr_train = nr_epoch where nr_train=28647/29786
(40000*200)/29786. = 268.6
(40000* 50)/29786. = 67.1
(40000*_cfg.TRAIN.BATCH_SIZE)/29786. / 2 /10
"""
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = int((40000 * batch_size) / 29786. / 2 / 10) * 10 # '2' mean at most decay 2 times.
lr = opt.base_lr * (0.1 ** (epoch // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_learning_rate_by_iter(optimizer, cur_iter, max_iter):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_iter // 3 # add just learning rate 3 times.
lr = opt.base_lr * (0.1 ** (max(cur_iter, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def adjust_learning_rate_by_epoch(optimizer, cur_epoch, max_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_epoch // 3 # add just learning rate 3 times.
lr = opt.base_lr * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
def test(dataset_test, work_dir, test_model=None, marker='epoch'):
out_rslt_path = work_dir + '/temp.out_rslt_path.txt'
out_eval_path = work_dir + '/temp.out_eval_path.txt'
if test_model is None:
test_model = model
# ---- Load trained weights here.------
assert os.path.exists(work_dir)
#
iter_nums, net_name = list_models(work_dir, marker=marker)
saved_iter_num = iter_nums[-1]
pretrained_model = work_dir + '/%s_%s_%s.pth.tar' % (
net_name, marker, saved_iter_num) # select maxmun iter number.
print('[pretrained_model] ', pretrained_model)
checkpoint = torch.load(pretrained_model) # load weights here.
_state_dict = patch_saved_DataParallel_state_dict(checkpoint['state_dict'])
test_model.load_state_dict(_state_dict)
# switch to train mode
test_model.eval()
gLoss_redu = reducer_group(*watch_targets)
gPred_redu = reducer_group(*['quat'])
pre_time = time.time()
it = -1
epoch = -1
#
keys = dataset_test.keys
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=_cfg.TEST.BATCH_SIZE * nr_GPUs, shuffle=False,
num_workers=opt.num_workers * nr_GPUs, pin_memory=opt.pin_memory,
sampler=None)
with torch.no_grad():
pbar = tqdm(test_loader)
for _i_, sample_batched in enumerate(pbar):
pbar.set_description("[work_dir] %s " % _short_work_dir)
it += 1
# Note: Tensor.cuda() Returns a copy of this object in CUDA memory.
label = torch.autograd.Variable(sample_batched['label'].cuda(non_blocking=True))
data = torch.autograd.Variable(sample_batched['data'].cuda(non_blocking=True))
# formulate GT dict
_gt_targets = test_model.gt_targets if hasattr(test_model, 'gt_targets') else test_model.targets
GT = edict()
for tgt in _gt_targets:
GT[tgt] = torch.autograd.Variable(sample_batched[tgt].cuda(non_blocking=True))
# compute Pred output
Prob = test_model(data, label)
# compute Loss for each target and formulate Loss dictionary.
Loss = test_model.compute_loss(Prob, GT)
total_loss = 0
for tgt in watch_targets:
total_loss += Loss[tgt]
# predict target angles value
Pred = test_model.compute_pred(Prob)
gLoss_redu.collect(Loss) # pass in dict of all loss (loss_a, loss_e, loss_t).
gPred_redu.collect(Pred, squeeze=False)
# print loss info
cur_time = time.time()
time_consume = cur_time - pre_time
pre_time = cur_time
print('\r %s [test-iter] %5d / %5d ---------[time_consume] %.2f' % (
strftime("%Y-%m-%d %H:%M:%S", gmtime()), it, len(test_loader), time_consume))
for trgt in watch_targets:
_loss = Loss[trgt].data.cpu().numpy().copy()
print(' %-15s loss=%.3f' % (trgt, _loss))
if np.isnan(_loss):
print("[Warning] Weights explode! Stop training ... ")
exit(-1)
# pbar.set_description("[work_dir] %s " % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:])
# print ("\r[work_dir] %s \r" % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:],end='')
sys.stdout.flush()
pred_quats = gPred_redu.reduce()['quat']
# -- Write result to file (Format: # {obj_id} {a} {e} {t} )
txtTbl = TxtTable('{obj_id:<20s} {a:>6.4f} {b:>6.4f} {c:>6.4f} {d:>6.4f}')
rslt_lines = [txtTbl.getHeader()]
for _k, _quat in zip(keys, pred_quats):
_a, _b, _c, _d = _quat
rslt_line = txtTbl.format(_k, _a, _b, _c, _d)
rslt_lines.append(rslt_line)
rslt_lines = '\n'.join(rslt_lines)
Open(out_rslt_path, 'w').write(rslt_lines)
#
print('[out_rslt_path]', out_rslt_path)
# -- Do evaluation ('MedError', 'Acc@theta')
from numpy_db import npy_table
rc_tbl = npy_table(dataset_test.recs)
#
summary_str = eval_cates(out_rslt_path, rc_tbl, cates=opt.cates,
theta_levels_str='pi/6 pi/12 pi/24') # ['aeroplane','boat','car'])
Open(out_eval_path, 'w').write(summary_str)
print(summary_str)
reca = TxtTable().load_as_recarr(out_eval_path, fields=['MedError', 'Acc@pi/6', 'Acc@pi/12', 'Acc@pi/24'])
return reca[-1]
def train(nr_disp=5000):
os.system('rm -rf %s ' % (work_dir + "/logs"))
logger = SummaryWriter(work_dir + "/logs")
nr_epoch = opt.nr_epoch
nr_iter = opt.nr_epoch * (len(dataset_train) / batch_size) # 130800
# based on iter
disp_interval = int(nr_iter / nr_disp)
pre_time = time.time()
it = start_it - 1 # -1
epoch = start_epoch - 1 # -1
#
while epoch < nr_epoch:
epoch += 1
# Do test first
if epoch % opt.test_step_epoch == 0:
mederr, acc6, acc12, acc24 = test(dataset_test, work_dir, model)
logger.add_scalars('acc/test',
{'MedError': mederr, 'Acc@pi/6': acc6, 'Acc@pi/12': acc12, 'Acc@pi/24': acc24},
epoch + 1)
# switch to train mode
model.train()
lr = adjust_learning_rate_by_epoch(optimizer, epoch, nr_epoch) # opt.base_lr
pbar = tqdm(train_loader)
for _i_, sample_batched in enumerate(pbar):
pbar.set_description("[work_dir] %s B=%s " % (_short_work_dir, batch_size))
rec_inds = sample_batched['idx'].numpy()
#
it += 1
label = torch.autograd.Variable(sample_batched['label'].cuda(non_blocking=True))
data = torch.autograd.Variable(sample_batched['data'].cuda(non_blocking=True))
# formulate GT dict
_gt_targets = model.gt_targets if hasattr(model, 'gt_targets') else model.targets
GT = edict()
for tgt in _gt_targets:
GT[tgt] = torch.autograd.Variable(sample_batched[tgt].cuda(non_blocking=True))
# compute Pred output
Prob = model(data, label)
# compute Loss for each target and formulate Loss dictionary.
Loss = model.compute_loss(Prob, GT)
total_loss = 0
for tgt in watch_targets:
total_loss += Loss[tgt] # * loss_weight
# compute gradient and do SGD step
optimizer.zero_grad() # Clears the gradients of all optimized Variable s.
total_loss.backward()
optimizer.step()
logger.add_scalars('loss_iter', Loss, it + 1)
# logger.add_scalar('grad_norm/fc7', fc7_gradNorm, it+1)
# print loss info
if it % disp_interval == 0: # or (it+1)==len(dataset_train)/batch_size:
cur_time = time.time()
time_consume = cur_time - pre_time
pre_time = cur_time
logprint(
'%s [epoch] %3d/%3d [iter] %5d -----------------------------------[time_consume] %.2f lr=%.8f'
% (strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch + 1, nr_epoch, it + 1, time_consume, lr))
for tgt in watch_targets:
_loss = Loss[tgt].data.cpu().numpy().copy()
logprint(' %-15s loss=%.3f' % (tgt, _loss))
if np.isnan(_loss):
print("[Warning] Weights explode! Stop training ... ")
exit(-1)
# Compute Acc@theta
recs = dataset_train.recs[rec_inds]
Pred = model.compute_pred(Prob)
geo_dists = compute_geo_dists(Pred['quat'], recs.so3.quaternion)
MedError = np.median(geo_dists) / np.pi * 180.
theta_levels = odict(zip(['pi/6', 'pi/12', 'pi/24'], [np.pi / 6, np.pi / 12, np.pi / 24]))
# # {'pi/6':np.pi/6, 'pi/12':np.pi/12, 'pi/24':np.pi/24})
Acc_at_ts = odict([(tname, sum(geo_dists < tvalue) / float(len(geo_dists))) for tname, tvalue in
theta_levels.items()])
logger.add_scalars('acc/train', Acc_at_ts, it + 1)
acc_str = ' '.join(['[%s] %3.1f%%' % (k, Acc_at_ts[k] * 100) for k, v in theta_levels.items()])
logprint(' Acc@{ %s } ' % acc_str)
# pbar.set_description("[work_dir] %s B=%s \r" % (os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:], batch_size))
# print ("\r[work_dir] %s B=%s \r" % (os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:], batch_size), end='')
sys.stdout.flush()
#
#
if (epoch + 1) % opt.snapshot_step_epoch == 0:
save_checkpoint({
'it_and_epoch': (it, epoch),
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, filename=os.path.join(work_dir, 'train_epoch_%s.pth.tar' % (epoch + 1)))
logger.close()
if __name__ == '__main__':
if opt.test_only:
test(dataset_test, work_dir)
else:
train()
rm_models(work_dir, marker='epoch')
| 20,434 | 39.951904 | 146 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/agent.py | from utils import TrainClock, KSchedule
import os
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import sys
BASEPATH = os.path.dirname(__file__)
sys.path.append(os.path.join(BASEPATH, '..', '..', 'utils'))
import tools
import rpmg
from networks import get_network
def get_agent(config):
return MyAgent(config)
class MyAgent(object):
"""Base trainer that provides common training behavior.
All customized trainer should be subclass of this class.
"""
def __init__(self, config):
self.config = config
self.clock = TrainClock()
self.k_schedule = KSchedule(config.k_init, config.k_safe, config.max_iters)
self.net = get_network(config).cuda()
self.optimizer = optim.Adam(self.net.parameters(), config.lr)
self.criterion = torch.nn.MSELoss(reduction='sum')
self.writer = SummaryWriter(log_dir=self.config.log_dir)
rpmg.logger_init(self.writer)
def adjust_learning_rate_by_epoch(self, optimizer, cur_epoch, max_epoch):
"""Sets the learning rate to the initial LR decayed by 10 every _N_ epochs"""
_N_ = max_epoch // 3 # add just learning rate 3 times.
lr = self.config.lr * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def save_ckpt(self, name=None):
"""save checkpoint during training for future restore"""
if name is None:
save_path = os.path.join(self.config.model_dir, "ckpt_epoch{}.pth".format(self.clock.epoch))
print("Saving checkpoint epoch {}...".format(self.clock.epoch))
else:
save_path = os.path.join(self.config.model_dir, "{}.pth".format(name))
if isinstance(self.net, nn.DataParallel):
model_state_dict = self.net.module.cpu().state_dict()
else:
model_state_dict = self.net.cpu().state_dict()
torch.save({
'clock': self.clock.make_checkpoint(),
'model_state_dict': model_state_dict,
'optimizer_state_dict': self.optimizer.state_dict(),
}, save_path)
self.net.cuda()
def load_ckpt(self, name=None):
"""load checkpoint from saved checkpoint"""
name = name if name == 'latest' else "ckpt_epoch{}".format(name)
load_path = os.path.join(self.config.model_dir, "{}.pth".format(name))
if not os.path.exists(load_path):
raise ValueError("Checkpoint {} not exists.".format(load_path))
checkpoint = torch.load(load_path)
print("Loading checkpoint from {} ...".format(load_path))
if isinstance(self.net, nn.DataParallel):
self.net.module.load_state_dict(checkpoint['model_state_dict'])
else:
self.net.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.clock.restore_checkpoint(checkpoint['clock'])
def forward(self, img,gt):
img = img.cuda()
gt = gt.cuda() # (b, 3, 3)
pred = self.net(img) # (b, 9)
if 'RPMG' in self.config.mode:
k = self.k_schedule.get_k(self.clock.iteration)
pred_orth = rpmg.RPMG.apply(pred, k, 0.01, gt, self.clock.iteration)
loss = self.criterion(pred_orth, gt)
elif '9D' in self.config.mode:
pred_orth = tools.symmetric_orthogonalization(pred)
if self.config.mode == '9D_SVD':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '9D_inf':
loss = self.criterion(pred, gt.flatten(1))
else:
raise NotImplementedError
elif '6D' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_ortho6d(pred)
if self.config.mode == '6D_GM':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '6D_inf':
gt_6d = torch.cat(gt[:, :, 0], gt[:, :, 1], 1)
loss = self.criterion(pred, gt_6d)
else:
raise NotImplementedError
elif '4D' in self.config.mode:
pred_orth = tools.compute_rotation_matrix_from_quaternion(pred)
if self.config.mode == '4D_norm':
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '4D_inf':
gt_q = tools.compute_quaternions_from_rotation_matrices(gt) # (b, 4)
loss = self.criterion(pred, gt_q)
elif self.config.mode == '4D_Axis':
pred_orth = tools.compute_rotation_matrix_from_axisAngle(pred)
loss = self.criterion(pred_orth, gt)
else:
raise NotImplementedError
elif self.config.mode == '3D_Euler':
pred_orth = tools.compute_rotation_matrix_from_euler(pred)
loss = self.criterion(pred_orth, gt)
elif self.config.mode == '10D':
pred_orth = tools.compute_rotation_matrix_from_10d(pred)
loss = self.criterion(pred_orth, gt)
else:
raise NotImplementedError
err_deg = torch.rad2deg(tools.compute_geodesic_distance_from_two_matrices(pred_orth, gt)) # batch
return pred, loss, err_deg
def train_func(self, real_data,syn_data):
"""one step of training"""
self.net.train()
img = torch.cat((real_data[0],syn_data[0]),0)
gt = torch.cat((real_data[1],syn_data[1]),0).squeeze(1)
pred, loss, err_deg = self.forward(img, gt)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return pred, loss, err_deg
def val_func(self, data):
"""one step of validation"""
self.net.eval()
with torch.no_grad():
pred, loss, err_deg = self.forward(data[0],data[1].squeeze(1))
return pred, loss, err_deg
if __name__ == '__main__':
max_epoch = 1000
_N_ = max_epoch // 3 # add just learning rate 3 times.
for cur_epoch in range(1000):
lr = 1e-3 * (0.1 ** (max(cur_epoch, 0) // _N_)) # 300))
if cur_epoch % 10 == 0:
print(f'epoch {cur_epoch}: {lr}')
| 6,286 | 38.540881 | 106 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/regQuatNet/regQuatNet.py | # coding: utf8
"""
@Author : Shuai Liao
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
import torch.nn.functional as F
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
from pytorch_util.netutil.common_v2.trunk_alexnet_bvlc import AlexNet_Trunk
from pytorch_util.netutil.common_v2.trunk_vgg import VGG16_Trunk
from pytorch_util.netutil.common_v2.trunk_resnet import ResNet101_Trunk, ResNet50_Trunk
net_arch2Trunk = dict(
alexnet=AlexNet_Trunk,
vgg16=VGG16_Trunk,
resnet101=ResNet101_Trunk,
resnet50=ResNet50_Trunk
)
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit, exp_Normalization
from basic.common import env, add_path
from lib.helper import *
loss_balance = 4.
def cls_pred(output, topk=(1,)):
maxk = max(topk)
batch_size = output.size(0)
_, pred = output.topk(maxk, 1, True, True)
return pred
def reg2d_pred2tgt(pr_sin, pr_cos):
theta = torch.atan2(pr_sin, pr_cos)
return theta
class _BaseReg_Net(nn.Module):
#
@staticmethod
def head_seq(in_size, reg_n_D, nr_cate=12, nr_fc8=334, init_weights=True): # in_size=4096
seq = nn.Sequential(
nn.Linear(in_size, nr_fc8), # Fc8
nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(nr_fc8, nr_cate * reg_n_D), # Prob
)
if init_weights:
init_weights_by_filling(seq, gaussian_std=0.005, kaiming_normal=True) # fill weight with gaussian filler
return seq
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True): # AlexNet_Trunk
super(_BaseReg_Net, self).__init__()
_Trunk = net_arch2Trunk[net_arch]
self.trunk = _Trunk(init_weights=init_weights)
self.nr_cate = nr_cate
self.top_size = 4096 if not self.trunk.net_arch.startswith('resnet') else 2048
def forword(self, x, label):
raise NotImplementedError
# ---------------------------------------------------------------------[reg_Direct]
class reg_Direct_Net(_BaseReg_Net): # No L2 norm at all,
""" No any L2 normalization to guarantee prediction is on n-sphere, smooth l1 loss is used. """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 Euler angles (a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for maskout specific category
self.maskout = Maskout(nr_cate=nr_cate)
# loss module
self.loss_handler = Smooth_L1_Loss_Handler()
self.targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0) # .split(1, dim=1)
# Note: quat(a,b,c,d) is on a 4d sphere and (x^2+y^2=1)
x_quat = self.maskout(self.head_quat(x).view(batchsize, self.nr_cate, self.reg_n_D), label)
# -- Normalize coordinate to a unit
# x_quat = norm2unit(x_quat) #, nr_cate=self.nr_cate)
Prob = edict(quat=x_quat)
return Prob
def compute_loss(self, Prob, GT):
Loss = self.loss_handler.compute_loss(self.targets, Prob, GT)
return Loss
@staticmethod
def compute_pred(Prob):
x_quat = Prob['quat']
# -- Normalize coordinate to a unit
x_quat = norm2unit(x_quat) # Note: here we do l2 normalization. Just to make predicted quaternion a unit norm.
#
batchsize = x_quat.size(0)
# Get cpu data.
batch_data = x_quat.data.cpu().numpy().copy()
assert batch_data.shape == (batchsize, 4), batch_data.shape
Pred = edict(quat=batch_data)
return Pred
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------[reg_Sflat]
class reg_Sflat_Net(_BaseReg_Net):
""" L2 normalization activation, with cosine proximity loss. """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 (for a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for maskout a,e,t
self.maskout = Maskout(nr_cate=nr_cate)
# loss module
self.loss_handler = Cos_Proximity_Loss_Handler()
self.targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x)
#
batchsize = x.size(0)
# Note: quat(a,b,c,d) is on a 4d sphere and (x^2+y^2=1)
x_quat = self.maskout(self.head_quat(x).view(batchsize, self.nr_cate, self.reg_n_D), label)
# -- Normalize coordinate to a unit
x_quat = norm2unit(x_quat) # , nr_cate=self.nr_cate)
Prob = edict(quat=x_quat)
return Prob
def compute_loss(self, Prob, GT):
Loss = self.loss_handler.compute_loss(self.targets, Prob, GT)
return Loss
@staticmethod
def compute_pred(Prob):
x_quat = Prob['quat']
batchsize = x_quat.size(0)
# Get cpu data.
batch_data = x_quat.data.cpu().numpy().copy()
assert batch_data.shape == (batchsize, 4), batch_data.shape
Pred = edict(quat=batch_data)
return Pred
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------[reg_Sexp]
class reg_Sexp_Net(_BaseReg_Net):
""" Spherical exponential activation + Sign classification, with cosine proximity loss """
"""BVLC alexnet architecture (Note: slightly different from pytorch implementation.)"""
def __init__(self, nr_cate=12, net_arch='alexnet', init_weights=True):
_BaseReg_Net.__init__(self, nr_cate=nr_cate, net_arch=net_arch, init_weights=init_weights)
self.nr_cate = nr_cate
self.reg_n_D = 4
# Note: for a quaternion q=(a,b,c,d), we always ensure a>0, that this cos(theta/2)>0 --> theta in [0,pi]
# Thus only b,c,d need sign prediction.
dim_need_sign = 3
_signs = list(product(*([(-1, 1)] * dim_need_sign))) # [(-1, -1, -1), (-1, -1, 1), ..., (1, 1, 1)], with len=8
self.signs = [(1,) + x for x in _signs] # [(1, -1, -1, -1), (1, -1, -1, 1), ..., (1, 1, 1, 1)], with len=8
self.signs2label = odict(zip(self.signs, range(len(self.signs))))
self.label2signs = Variable(torch.FloatTensor(self.signs)).cuda() # make it as a Variable
# -- Head architecture
# Note: for quaternion, there's only one regression head (instead of 3 (for a,e,t)).
# Thus, nr_fc8=996 (see design.py)
self.head_sqrdprob_quat = self.head_seq(self.top_size, self.reg_n_D, nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# each of 3 quaternion complex component can be + or -, that totally 2**3 possible sign categories.
self.head_signcate_quat = self.head_seq(self.top_size, len(self.signs), nr_cate=nr_cate, nr_fc8=996,
init_weights=init_weights)
# for abs branch
self.maskout = Maskout(nr_cate=nr_cate)
self.softmax = nn.Softmax(dim=1).cuda()
# for sgc branch
self.maskout_sgc = Maskout(nr_cate=nr_cate) # make a new layer to maskout sign classification only.
# loss module
self.loss_handler_abs_quat = Cos_Proximity_Loss_Handler() # Neg_Dot_Loss_Handler() # Cos_Proximity_Loss_Handler() #
self.loss_handler_sgc_quat = Cross_Entropy_Loss_Handler()
self.targets = ['abs_quat', 'sgc_quat']
self.gt_targets = ['quat']
def forward(self, x, label):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0)
# Note: squared probability
x_sqr_quat = self.maskout(self.head_sqrdprob_quat(x).view(batchsize, self.nr_cate, self.reg_n_D),
label) # ========>>>>> Maskout output (B,4) hook gradient.
# -- Exp and Normalize coordinate to a unit
x_sqr_quat = self.softmax(x_sqr_quat) # , nr_cate=self.nr_cate)
# sign category head (totally 2^4=16 category)
x_sgc_quat = self.maskout_sgc(self.head_signcate_quat(x).view(batchsize, self.nr_cate, len(self.signs)), label)
Prob = edict(abs_quat=torch.sqrt(x_sqr_quat), sgc_quat=x_sgc_quat)
return Prob
def compute_loss(self, Prob, GT):
# First get sign label from GT
# == Formulate absolute value of quaternion
GT_abs_quat = torch.abs(GT.quat)
# == Formulate signs label of quaternion
GT_sign_quat = torch.sign(GT.quat)
GT_sign_quat[GT_sign_quat == 0] = 1 # make sign of '0' as 1
signs_tuples = [tuple(x) for x in GT_sign_quat.data.cpu().numpy().astype(np.int32).tolist()]
for signs_tuple in signs_tuples: # q and -q gives the same rotation.
assert signs_tuple[
0] > 0, "Need GT to be all positive on first dim of quaternion: %s" % GT # assert all quaternion first dim is positive.
# signs label
GT_sgc_quat = Variable(torch.LongTensor([self.signs2label[signs_tuple] for signs_tuple in signs_tuples]))
if GT.quat.is_cuda:
GT_sgc_quat = GT_sgc_quat.cuda()
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_quat'.
_GT = edict(abs_quat=GT_abs_quat, sgc_quat=GT_sgc_quat)
Loss_abs_quat = self.loss_handler_abs_quat.compute_loss(['abs_quat'], Prob, _GT)
Loss_sgc_quat = self.loss_handler_sgc_quat.compute_loss(['sgc_quat'], Prob, _GT)
# To add loss weights here.
Loss = edict(abs_quat=Loss_abs_quat['abs_quat'] * 10, # / 5.
sgc_quat=Loss_sgc_quat['sgc_quat'], )
return Loss
def compute_pred(self, Prob):
x_abs_quat = Prob['abs_quat'] # torch.sqrt(torch.exp(Prob['logsqr_quat']))
x_sgc_quat = Prob['sgc_quat']
batchsize = x_abs_quat.size(0)
#
sign_ind = cls_pred(x_sgc_quat, topk=(1,)).data.view(-1, )
item_inds = torch.from_numpy(np.arange(batchsize)).cuda()
_label_shape = self.label2signs.size()
x_sign_quat = self.label2signs.expand(batchsize, *_label_shape)[item_inds, sign_ind]
x_quat = x_abs_quat * x_sign_quat
# Get cpu data.
batch_quat = x_quat.data.cpu().numpy().copy()
batchsize = x_quat.size(0)
assert batch_quat.shape == (batchsize, 4), batch_quat.shape
#
Pred = edict(quat=batch_quat)
return Pred
# ---------------------------------------------------------------------
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
model = reg2D_Net().copy_weights()
# import numpy as np
dummy_batch_data = np.zeros((2, 3, 227, 227), dtype=np.float32)
dummy_batch_label = np.zeros((2, 1), dtype=np.int64)
dummy_batch_data = torch.autograd.Variable(torch.from_numpy(dummy_batch_data))
dummy_batch_label = torch.autograd.Variable(torch.from_numpy(dummy_batch_label))
Pred = model(dummy_batch_data, dummy_batch_label)
# print (Prob.a)
| 12,708 | 38.715625 | 143 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/lib/helper.py | # coding: utf8
"""
@Author : Shuai Liao
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
#
from collections import OrderedDict
class Cross_Entropy_Loss_Handler:
def __init__(self):
self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.cross_entropy_loss(Pred[tgt], GT[tgt])
return Loss
class Neg_Dot_Loss_Handler:
def __init_(self):
pass
def compute_loss(self, tgts, Pred, GT):
Loss = edict()
for tgt in tgts:
""" Bug fixed on 22 Aug 2018
torch.dot can only be applied to 1-dim tensor
Don't know why there's no error. """
# Loss[tgt] = torch.mean( -torch.dot(GT[tgt],Pred[tgt]) ) # In fact here only does -GT[tgt]*Pred[tgt]
Loss[tgt] = torch.mean(-torch.sum(GT[tgt] * Pred[tgt], dim=1))
return Loss
class Cos_Proximity_Loss_Handler:
def __init__(self):
# self.period_l1_loss = PeriodL1Loss(period=period).cuda()
self.cos_sim = nn.CosineSimilarity().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names. In this case has to be tgts=['quat']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
# assert tgts==['quat'], tgts
Loss = edict()
for tgt in tgts:
Loss[tgt] = torch.mean(1 - self.cos_sim(Pred[tgt], GT[tgt])) # use 1-cos(theta) to make loss as positive.
return Loss
class Smooth_L1_Loss_Handler:
def __init__(self):
self.smooth_l1_loss = nn.SmoothL1Loss().cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.smooth_l1_loss(Pred[tgt], GT[tgt]) # [warning] pred first, gt second
return Loss
| 2,518 | 30.4875 | 118 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/lib/datasets/Dataset_Base.py | """
@Author : Shuai Liao
"""
import os, sys
import numpy as np
from math import ceil, floor, pi
import torch
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict as odict
import cv2
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(os.path.join(path, 'pylibs'))
from basic.common import add_path, env, rdict, cv2_wait, cv2_putText, is_py3
if is_py3:
import pickle
else:
import cPickle as pickle
from lmdb_util import ImageData_lmdb
from numpy_db import npy_table, npy_db, dtype_summary, reorder_dtype
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = this_dir + '/../../../dataset' # where the dataset directory is.
assert os.path.exists(base_dir)
cate10 = ['bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet']
cate40 = ['airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'bowl',
'car', 'chair', 'cone', 'cup', 'curtain', 'desk', 'door', 'dresser',
'flower_pot', 'glass_box', 'guitar', 'keyboard', 'lamp', 'laptop',
'mantel', 'monitor', 'night_stand', 'person', 'piano', 'plant',
'radio', 'range_hood', 'sink', 'sofa', 'stairs', 'stool',
'table', 'tent', 'toilet', 'tv_stand', 'vase', 'wardrobe', 'xbox']
## Net configurations that are independent of task
netcfg = rdict( # configuration for alexnet
alexnet=rdict(TRAIN=rdict(BATCH_SIZE=200),
TEST=rdict(BATCH_SIZE=200),
INPUT_SHAPE=(227, 227), # resize_shape
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ), # ignore_label=-1,
# configuration for vgg
vgg16=rdict(TRAIN=rdict(BATCH_SIZE=40), # 64 20
TEST=rdict(BATCH_SIZE=20),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet50
resnet50=rdict(TRAIN=rdict(BATCH_SIZE=100), # 128
TEST=rdict(BATCH_SIZE=64),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet101
resnet101=rdict(TRAIN=rdict(BATCH_SIZE=64),
TEST=rdict(BATCH_SIZE=20),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
# configuration for resnet152
resnet152=rdict(TRAIN=rdict(BATCH_SIZE=32),
TEST=rdict(BATCH_SIZE=10),
INPUT_SHAPE=(224, 224),
PIXEL_MEANS=np.array([[[102.9801, 115.9465, 122.7717]]]),
RNG_SEED=3, ),
)
def get_anno(db_path, category_target): # target=''
# TO Move to generation of data db.
viewID2quat = pickle.load(open(os.path.join(db_path, 'viewID2quat.pkl'), 'rb'), encoding='latin1')
viewID2euler = pickle.load(open(os.path.join(db_path, 'viewID2euler.pkl'), 'rb'), encoding='latin1')
keys = np.array(list(viewID2quat.keys()))
idx_target = []
add_path(this_dir)
from db_type import img_view_anno
rcs = np.zeros((len(viewID2quat.keys()),), dtype=img_view_anno).view(np.recarray)
for i, (key, quat) in enumerate(viewID2quat.items()):
rc = rcs[i]
rc.img_id = key # bathtub_0107.v001
cad_id, viewId = key.split('.')
category = cad_id[:cad_id.rfind('_')]
if category == category_target:
idx_target.append(i)
rc.category = category
rc.cad_id = cad_id
rc.so3.quaternion = quat if quat[0] > 0 else -quat # q and -q give the same rotation matrix.
# Make sure all q[0]>0, that is rotation angle in [0,pi]
rc.so3.euler = viewID2euler[key]
rcs_cate = rcs[idx_target]
keys_cate = keys[idx_target]
return keys_cate, rcs_cate
class Dataset_Base(Dataset):
collection2dbname = \
dict(train='train_100V.Rawjpg.lmdb', # 'train_20V.Rawjpg.lmdb'
test='test_20V.Rawjpg.lmdb',
)
def __init__(self, category, collection='train', net_arch='alexnet', sampling=1.0):
self.net_arch = net_arch
self.cfg = netcfg[net_arch]
self.collection = collection
self.cates = cate10
#
self.cate2ind = odict(zip(self.cates, range(len(self.cates))))
# get im_db
self.db_path = os.path.join(base_dir, 'ModelNet10-SO3', self.collection2dbname[collection])
assert self.db_path is not None, '%s is not exist.' % (self.db_path)
self.datadb = ImageData_lmdb(self.db_path)
# Get anno
self.keys, self.recs = get_anno(self.db_path, category)
assert sampling > 0 and sampling <= 1.0, sampling
if sampling < 1.0:
print('Sampling dataset: %s' % sampling)
_inds = np.arange(len(self.keys))
sample_inds = np.random.choice(_inds, size=int(len(_inds) * sampling), replace=False)
sample_inds.sort()
self.keys, self.recs = [self.keys[x] for x in sample_inds], self.recs[sample_inds]
self.key2ind = dict(zip(self.keys, range(len(self.keys))))
# self.resize_shape = rsz_shape
self.mean_pxl = np.array([102.9801, 115.9465, 122.7717], np.float32)
#
def _image2data(self, img, data_normal_type='caffe'):
if self.net_arch == 'alexnet':
img = np.pad(img, [(0, 3), (0, 3), (0, 0)], mode='edge') # (0,0,3,3)
# caffe-style
if data_normal_type == 'caffe':
# Subtract mean pixel
data = (img - self.mean_pxl).astype(np.float32) / 255.
# Transpose
data = data.transpose((2, 0, 1)) # H,W,C -> C,H,W
elif data_normal_type == 'pytorch':
# -# img = cv2.cvtColor( img, cv2.COLOR_GRAY2RGB )
# -# if self.transform is not None:
# -# img = self.transform(img) # return (3,224,224)
raise NotImplementedError
else:
raise NotImplementedError
return data
def _get_image(self, rc):
img_id = rc.img_id
img = self._image2data(self.datadb[img_id])
return img # if not flip else cv2.flip( img, 1 )
def __len__(self):
return len(self.recs)
def __getitem__(self, idx):
rcobj = self.recs[idx]
cate = rcobj.category
obj_id = rcobj.obj_id
image_id = rcobj.src_img.image_id
""" To implement construction of sample dictionary.
To get image data: call 'self.roiloader(rcobj)'
"""
print('This is an interface method, and you need to implement it in inherited class.')
raise NotImplementedError
def get_recs(self, query_keys):
inds = [self.key2ind[k] for k in query_keys]
return self.recs[inds]
# interface method
def _vis_minibatch(self, sample_batched):
"""Visualize a mini-batch for debugging."""
for i, (idx, label, quat, data) in enumerate(zip(sample_batched['idx'], # note: these are tensors
sample_batched['label'],
sample_batched['quat'],
sample_batched['data'])):
rc = self.recs[idx]
# print idx
im = data.numpy().transpose((1, 2, 0)).copy()
im += self.cfg.PIXEL_MEANS
im = im.astype(np.uint8) # xmin, ymax
a, b, c, d = quat
cv2_putText(im, (0, 20), rc.category, bgcolor=(255, 255, 255))
text = '%.1f %.1f %.1f %.1f' % (a, b, c, d)
cv2_putText(im, (0, 40), text, bgcolor=(255, 255, 255))
cv2.imshow('im', im)
cv2_wait()
# pass
class Dataset_Example(Dataset_Base):
def __getitem__(self, idx):
rc = self.recs[idx]
cate = rc.category
# img_id = rc.img_id
quat = rc.so3.quaternion
#
sample = dict(idx=idx,
label=self.cate2ind[cate],
quat=quat,
data=self._get_image(rc))
return sample
if __name__ == '__main__':
def test_dataloader():
dataset = Dataset_Example(collection='test', sampling=0.2)
#
dataloader = DataLoader(dataset, batch_size=4,
shuffle=False, num_workers=1)
for i_batch, sample_batched in enumerate(dataloader):
dataset._vis_minibatch(sample_batched)
test_dataloader()
| 8,723 | 38.654545 | 108 | py |
RPMG | RPMG-main/Pascal3D_Img/S3.3D_Rotation/lib/datasets/dataset_regQuatNet.py | """
@Author : Shuai Liao
"""
import numpy as np
from Dataset_Base import Dataset_Base, netcfg
import torch
from torch.utils.data import Dataset, DataLoader
import cv2
from basic.common import add_path, env, rdict, cv2_wait, cv2_putText
# ===============================================================
def pred2angle(a, e, t):
return (a * 180. / np.pi) % 360, e * 180. / np.pi, t * 180. / np.pi
def pred2angle_shift45(a, e, t):
# shift 45 degree back
return (a * 180. / np.pi - 45) % 360, e * 180. / np.pi, t * 180. / np.pi
class Dataset_regQuatNet(Dataset_Base):
def __init__(self, *args, **kwargs):
Dataset_Base.__init__(self, *args, **kwargs)
def __getitem__(self, idx):
rc = self.recs[idx]
cate = rc.category
quat = rc.so3.quaternion
sample = dict(idx=idx,
label=self.cate2ind[cate],
quat=quat,
data=self._get_image(rc))
return sample
# build class alias
Dataset_reg_Direct = Dataset_regQuatNet
Dataset_reg_Sexp = Dataset_regQuatNet
Dataset_reg_Sflat = Dataset_regQuatNet
if __name__ == '__main__':
np.random.seed(3)
def test_dataloader(collection='test', sampling=0.2):
dataset = Dataset_reg_Sexp(collection=collection, sampling=sampling)
print(len(dataset.keys))
anno_path = dataset.db_path
sampling_file = anno_path + '/%s_sampling%.2f.txt' % (collection, sampling)
with open(sampling_file, 'w') as f:
f.write('\n'.join(dataset.keys))
print('sampling_file:', sampling_file)
#
dataloader = DataLoader(dataset, batch_size=50,
shuffle=False, num_workers=1, sampler=None)
for i_batch, sample_batched in enumerate(dataloader):
print(sample_batched)
dataset._vis_minibatch(sample_batched)
test_dataloader()
| 1,914 | 27.58209 | 83 | py |
BVFSM | BVFSM-main/BVFSM.py | import matplotlib
import matplotlib.pyplot as plt
import torch
import hypergrad as hg
import numpy as np
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import copy
import time
import csv
import math
import os
import psutil as psutil
import argparse
import utils
import function
def train(args, lf=function.lf, uF=function.uF):
args.y_size = args.y_size
a = args.a
b = args.b
x0 = args.x0
y0 = args.y0
d = 1
seed = 1
np.random.seed(seed)
z_loop = args.z_loop
y_loop = args.y_loop
x_loop = args.x_loop
z_L2 = args.z_L2_reg
y_L2 = args.y_L2_reg
y_ln = args.y_ln_reg
z_lr = args.z_lr
y_lr = args.y_lr
x_lr = args.x_lr
TK = 1
z0 = y0
dc = args.decay
dcr = 1.1
total_time = 0
total_hyper_time = 0
log_path = "result_{}.csv".format(time.strftime("%Y_%m_%d_%H_%M_%S"))
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
['z_loop{}-y_loop{}-z_L2{}-y_L2{}-y_ln{}-z_lr{}-y_lr{}-x_lr{}'.format(z_loop, y_loop, z_L2,
y_L2, y_ln, z_lr,
y_lr, x_lr),
'd', 'x_itr', 'test loss', 'h_norm', 'step_time', 'total_time', 'x', 'y'])
w = (float(y0) * torch.ones(args.y_size)).cuda().requires_grad_(True)
h = (float(x0) * torch.ones(args.x_size)).cuda().requires_grad_(True)
w_z = (float(z0) * torch.ones(args.y_size)).cuda().requires_grad_(True)
w_opt = torch.optim.SGD([w], lr=y_lr)
h_opt = torch.optim.Adam([h], lr=x_lr)
w_z_opt = torch.optim.SGD([w_z], lr=z_lr)
for x_itr in range(x_loop * TK):
h_opt.zero_grad()
step_start_time = time.time()
lr_decay_rate = 1 / (1 ** (math.floor(x_itr / TK)))
if dc == 'log':
reg_decay_rate = 1 / (math.log(dcr * math.floor((x_itr + 1) / TK)))
elif dc == 'power1':
reg_decay_rate = 1 / (dcr ** math.floor((x_itr + 1) / TK))
elif dc == 'power2':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK) ** dcr)
elif dc == 'linear':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK) * dcr)
else:
assert 1
w_opt.param_groups[0]['lr'] = w_opt.defaults['lr'] * lr_decay_rate
h_opt.param_groups[0]['lr'] = h_opt.defaults['lr'] * lr_decay_rate
w_z_opt.param_groups[0]['lr'] = w_z_opt.defaults['lr'] * lr_decay_rate
for z_itr in range(z_loop):
w_z_opt.zero_grad()
loss_z = lf(h, w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_z.backward()
w_z_opt.step()
for y_itr in range(y_loop):
w_opt.zero_grad()
loss_w_f = lf(h, w)
loss_z = lf(h, w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_w_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_w_ln = y_ln * reg_decay_rate * torch.log(
loss_w_f.detach() + args.y_size + 1e-8 + loss_z.detach() - loss_w_f)
loss_w_ = uF(h, w)
loss_w = loss_w_ + loss_w_L2 - loss_w_ln
loss_w.backward()
w_opt.step()
low_time = time.time() - step_start_time
hyper_time = time.time()
h_opt.zero_grad()
loss_w_f = lf(h, w)
loss_z = lf(h, w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_h_ = uF(h, w)
loss_h_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_h_ln = y_ln * reg_decay_rate * torch.log(loss_w_f.detach() + args.y_size + 1e-8 + loss_z - loss_w_f)
loss_h = loss_h_ + loss_h_L2 - loss_h_ln
loss_h.backward()
h_opt.step()
step_time = time.time() - step_start_time
total_time += step_time
total_hyper_time += (time.time() - hyper_time)
if x_itr % TK == 0:
with torch.no_grad():
loss_test = uF(h, w)
hnp = h.detach().cpu().numpy()
wnp = w.detach().cpu().numpy()
print(
'd={:d},x_itr={:d},test loss={:.4f}, h_norm={:.4f},step_time={:.4f},total_time={:.4f},x={:.4f}'.format(
d, x_itr, loss_test.data, h.norm() / d,
step_time,
total_time, hnp[0]))
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
[d, x_itr, loss_test.data, h.norm() / d,
step_time,
total_time, hnp[0]] + [ws for ws in utils.np_to_list(wnp)])
| 4,871 | 34.562044 | 123 | py |
BVFSM | BVFSM-main/utils.py |
import matplotlib
import matplotlib.pyplot as plt
import torch
import hypergrad as hg
import numpy as np
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import copy
import time
import csv
import math
import os
import psutil as psutil
import argparse
def np_to_list(arr):
this_type_str = type(arr)
if this_type_str is np.ndarray:
arr = arr.tolist()
elif this_type_str in [np.int, np.int32, np.int64]:
arr = [int(arr), ]
else:
arr = arr
return arr
def show_memory_info(hint):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
memory = info.uss / 1024. / 1024
print(f"{hint} memory used: {memory} MB ")
def loss_L2(parameters):
loss = 0
for w in parameters:
loss += torch.norm(w, 2) ** 2
return loss
def penalty(x):
return torch.log(x)
def tpenalty(x, d):
if x > d:
return torch.log(x)
| 952 | 17.326923 | 55 | py |
BVFSM | BVFSM-main/BVFSM_constraint.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import torch
import hypergrad as hg
import numpy as np
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import copy
import time
import csv
import math
import os
import psutil as psutil
def np_to_list(arr):
this_type_str = type(arr)
if this_type_str is np.ndarray:
arr = arr.tolist()
elif this_type_str in [np.int, np.int32, np.int64]:
arr = [int(arr),]
else:
arr = arr
return arr
def show_memory_info(hint):
# 获取当前进程的进程号
pid = os.getpid()
# psutil 是一个获取系统信息的库
p = psutil.Process(pid)
info = p.memory_full_info()
memory = info.uss/1024./1024
print(f"{hint} memory used: {memory} MB ")
cuda = False
double_precision = False
default_tensor_str = 'torch.cuda' if cuda else 'torch'
default_tensor_str += '.DoubleTensor' if double_precision else '.FloatTensor'
torch.set_default_tensor_type(default_tensor_str)
def frnp(x):
t = torch.from_numpy(x).cuda() if cuda else torch.from_numpy(x)
return t if double_precision else t.float()
def tonp(x, cuda=cuda):
return x.detach().cpu().numpy() if cuda else x.detach().numpy()
def loss_L2(parameters):
loss = 0
for w in parameters:
loss += torch.norm(w, 2) ** 2
return loss
def penalty(x):
return torch.log(x)
def tpenalty(x,d):
if x>d:
return torch.log(x)
initial=[0,2,4,6,8,10]
# In[3]:
for y_ln in [0.01]:
for x0,y0 in zip(initial,initial):
for con1 in range(1):
z_loop = 50
y_loop=25
tSize=2
a=2
b=2
# x0=2
# y0=2
# con1=0
d=1
iu=2
# synthetic data generation
seed = 1
n = 100
val_perc = 0.5
np.random.seed(seed)
# z_loop = 50
# y_loop = 25
x_loop = 500
z_L2 = 0.01
y_L2 = 0.01
# y_ln = 0.01
z_lr = 0.01
y_lr = 0.01
x_lr = 0.01
TK=1
# x0=3.
# y0=3.
# z0=3.
# a=0.
# b=0.
z0=y0
decay=['log','power1','power2','poly','linear']
dc=decay[iu]
dcr=1.1
conlist=['log','quad','None']
con=conlist[con1]
# In[4]:
total_time = 0
total_hyper_time = 0
log_path = "x1_constraint{}_yln{}_yzloop{}_{}_tSize{}_dc{}{}_TK{}_xyz{}{}{}_ab{}{}._{}.csv".format(con,y_ln,y_loop,z_loop, tSize,dc,dcr,TK,x0,y0,z0,a,b, time.strftime("%Y_%m_%d_%H_%M_%S"))
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
['z_loop{}-y_loop{}-z_L2{}-y_L2{}-y_ln{}-z_lr{}-y_lr{}-x_lr{}'.format(z_loop, y_loop, z_L2,
y_L2, y_ln, z_lr,
y_lr, x_lr),
'd','x_itr','test loss', 'h_norm','step_time','total_time','x','gx','y'])
# problem definition
use_gpu = False
# tSize=2000
Fmin=0
xmin=[]
xmind=0
for i in range(tSize):
Fmin=Fmin+(-np.pi/4/(i+1)-a)**2+(-np.pi/4/(i+1)-b)**2
xmin.append(-np.pi/4/(i+1))
xmind=xmind+(-np.pi/4/(i+1))**2
w = (float(0)*torch.ones(tSize)).cuda().requires_grad_(True)
h =(float(0)*torch.ones(1)).cuda().requires_grad_(True)
w_z = (float(0)*torch.ones(tSize)).cuda().requires_grad_(True)
w_opt = torch.optim.Adam([w], lr=y_lr)
h_opt = torch.optim.Adam([h], lr=x_lr)
w_z_opt = torch.optim.SGD([w_z], lr=z_lr)
C = (float(1) * torch.ones(tSize)).cuda().requires_grad_(False)
def lf(x,y):
out = 0
for i in range(tSize):
out = out + torch.sin((x + y[i] - C[i]))
return out
def uF(x,y):
return torch.norm(x - a) ** 2 + torch.norm(y - a ) ** 2
for x_itr in range(x_loop*TK):
print('-'*50)
h_opt.zero_grad()
step_start_time = time.time()
yhis=[]
lr_decay_rate = 1 / (1 ** (math.floor(x_itr / TK)))
if dc=='log':
reg_decay_rate = 1 / (math.log(dcr*math.floor((x_itr+1) / TK)))
elif dc=='power1':
reg_decay_rate = 1 / (dcr ** math.floor((x_itr + 1) / TK))
elif dc=='power2':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK)**dcr)
elif dc == 'linear':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK) * dcr)
else:
assert 1
w_opt.param_groups[0]['lr'] = w_opt.defaults['lr'] * lr_decay_rate
h_opt.param_groups[0]['lr'] = h_opt.defaults['lr'] * lr_decay_rate
w_z_opt.param_groups[0]['lr'] = w_z_opt.defaults['lr'] * lr_decay_rate
loss_z_l = 0
for z_itr in range(z_loop):
w_z_opt.zero_grad()
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_z.backward()
w_z_opt.step()
if x_itr==466:
print('y={}'.format(w_z[0].item()))
loss_y_l = 0
wl = w
for y_itr in range(y_loop):
w_opt.zero_grad()
loss_w_f = lf(h,w)
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_w_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_w_ln = y_ln * reg_decay_rate * torch.log(loss_w_f.detach()+tSize+1e-7+loss_z.detach() - loss_w_f)
loss_w_ =uF(h,w)
if con=='log':
ls=(h.detach()+w)*(h.detach()+w-1)
# print(-(h.detach()+w)*(h.detach()+w-1)+ls.detach()+1e-1)
loss_w = loss_w_ + loss_w_L2 - loss_w_ln- y_ln/ reg_decay_rate *torch.sum(torch.log(-(h.detach()+w)*(h.detach()+w-1)+ls.detach()+2e-1))
elif con=='quad':
loss_w = loss_w_ + loss_w_L2 - loss_w_ln + y_ln / reg_decay_rate * torch.sum(
torch.relu((h.detach() + w) * (h.detach() + w - 1)) ** 2)
else:
loss_w = loss_w_ + loss_w_L2 - loss_w_ln
loss_w.backward()
w_opt.step()
wl = w
low_time = time.time() - step_start_time
hyper_time = time.time()
h_opt.zero_grad()
loss_w_f = lf(h,w)
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_h_ = uF(h,w)
loss_h_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_h_ln = y_ln * reg_decay_rate * torch.log(loss_w_f.detach() +tSize+1e-8+ loss_z - loss_w_f)
if con=='log':
ls = (h.detach() + w) * (h.detach() + w - 1)
loss_h = loss_h_ + loss_h_L2 - loss_h_ln- y_ln/ reg_decay_rate *torch.sum(torch.log(-(h+w)*(h+w-1)+ls.detach()+2e-1))#+ y_ln/ reg_decay_rate *torch.relu((h)*(h-1))**2
elif con=='quad':
loss_h = loss_h_ + loss_h_L2 - loss_h_ln+ y_ln/ reg_decay_rate *torch.sum(torch.relu((h+w)*(h+w-1)))**2#+ y_ln/ reg_decay_rate *torch.relu((h)*(h-1))**2
else:
loss_h = loss_h_ + loss_h_L2 - loss_h_ln
grad_h=torch.autograd.grad(loss_h,[h],retain_graph=True)
grad_h_ = torch.autograd.grad(loss_h_, [h], retain_graph=True, allow_unused=True)
grad_h_L2 = torch.autograd.grad(loss_h_L2, [h], retain_graph=True, allow_unused=True)
grad_h_ln = torch.autograd.grad(loss_h_ln, [h], retain_graph=True, allow_unused=True)
loss_h.backward()
h_opt.step()
step_time = time.time() - step_start_time
total_time += step_time
total_hyper_time += (time.time() - hyper_time)
if x_itr % TK == 0:
with torch.no_grad():
loss_test = uF(h,w)
for g in grad_h:
gnp=g.detach().cpu().numpy()
hnp=h.detach().cpu().numpy()
wnp=w.detach().cpu().numpy()
print(
'd={:d},x_itr={:d},test loss={:.4f}, h_norm={:.4f},step_time={:.4f},total_time={:.4f},x={:.4f}'.format(
d, x_itr, loss_test.data, h.norm() / d,
step_time,
total_time,hnp[0]))
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
[d, x_itr, loss_test.data, h.norm() / d,
step_time,
total_time,hnp[0],gnp[0]]+[ws for ws in np_to_list(wnp)])
| 9,895 | 36.06367 | 200 | py |
BVFSM | BVFSM-main/demo.py | import matplotlib
import matplotlib.pyplot as plt
import torch
import hypergrad as hg
import numpy as np
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import copy
import time
import csv
import math
import os
import psutil as psutil
import argparse
import utils
import BVFSM
parser = argparse.ArgumentParser()
parser.add_argument('--x_size', type=int, default=1)
parser.add_argument('--y_size', type=int, default=2)
parser.add_argument('--z_loop', type=int, default=50)
parser.add_argument('--y_loop', type=int, default=25)
parser.add_argument('--x_loop', type=int, default=500)
parser.add_argument('--z_lr', type=float, default=0.01)
parser.add_argument('--y_lr', type=float, default=0.01)
parser.add_argument('--x_lr', type=float, default=0.01)
parser.add_argument('--z_L2_reg', type=float, default=0.01)
parser.add_argument('--y_L2_reg', type=float, default=0.01)
parser.add_argument('--y_ln_reg', type=float, default=0.001)
parser.add_argument('--x0', type=float, default=0.)
parser.add_argument('--y0', type=float, default=0.)
parser.add_argument('--a', type=float, default=2.)
parser.add_argument('--b', type=float, default=2.)
parser.add_argument('--c', type=float, default=2.)
parser.add_argument('--decay', type=str, default='log', help='log, power1, power2, poly, linear')
args = parser.parse_args()
a = args.a
b = args.b
C = (float(args.c) * torch.ones(args.y_size)).cuda().requires_grad_(False)
def lf(x, y):
out = 0
for i in range(args.y_size):
out = out + torch.sin((x + y[i] - C[i]))
return out
def uF(x, y):
return torch.norm(x - a) ** 2 + torch.norm(y - a - C) ** 2
BVFSM.train(args,lf,uF)
| 1,684 | 26.177419 | 97 | py |
BVFSM | BVFSM-main/BVFSM_PBO.py |
import matplotlib
import matplotlib.pyplot as plt
import torch
import hypergrad as hg
import numpy as np
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import copy
import time
import csv
import math
import os
import psutil as psutil
def show_memory_info(hint):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
memory = info.uss/1024./1024
print(f"{hint} memory used: {memory} MB ")
cuda = False
double_precision = False
default_tensor_str = 'torch.cuda' if cuda else 'torch'
default_tensor_str += '.DoubleTensor' if double_precision else '.FloatTensor'
torch.set_default_tensor_type(default_tensor_str)
def frnp(x):
t = torch.from_numpy(x).cuda() if cuda else torch.from_numpy(x)
return t if double_precision else t.float()
def tonp(x, cuda=cuda):
return x.detach().cpu().numpy() if cuda else x.detach().numpy()
def loss_L2(parameters):
loss = 0
for w in parameters:
loss += torch.norm(w, 2) ** 2
return loss
def penalty(x):
return torch.log(x)
def tpenalty(x,d):
if x>d:
return torch.log(x)
save_log=True
initial=[0]
for iu in [2]:
for a,b in zip([2],[2]):
for x0,y0 in zip([1],[7]):
d=1
tSize=2
# synthetic data generation
seed = 1
n = 100
val_perc = 0.5
np.random.seed(seed)
z_loop = 50
y_loop = 25
x_loop = 500
z_L2 = 0.001
y_L2 = 0.001
y_ln = 0.7
z_ln = 0.1
z_lr = 0.01
y_lr = 0.01
x_lr = 0.01
TK=1
z0=y0
decay=['log','power1','power2','poly','linear']
dc=decay[iu]
dcr=1.001
# In[4]:
total_time = 0
total_hyper_time = 0
if save_log:
log_path = "PBO\\x1_nonconvex_dc{}{}_TK{}_xyz{}{}{}_ab{}{}._{}.csv".format( dc,dcr,TK,x0,y0,z0,a,b, time.strftime("%Y_%m_%d_%H_%M_%S"))
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
['z_loop{}-y_loop{}-z_L2{}-y_L2{}-y_ln{}-z_lr{}-y_lr{}-x_lr{}'.format(z_loop, y_loop, z_L2,
y_L2, y_ln, z_lr,
y_lr, x_lr),
'd','x_itr','test loss', 'h_norm','step_time','total_time','x','y','x+y'])
use_gpu = False
Fmin=0
xmin=[]
xmind=0
for i in range(tSize):
Fmin=Fmin+(-np.pi/4/(i+1)-a)**2+(-np.pi/4/(i+1)-b)**2
xmin.append(-np.pi/4/(i+1))
xmind=xmind+(-np.pi/4/(i+1))**2
w = (float(y0)*torch.ones(tSize)).cuda().requires_grad_(True)
h =(float(x0)*torch.ones(1)).cuda().requires_grad_(True)
w_z = (float(y0)*torch.ones(tSize)).cuda().requires_grad_(True)
w_opt = torch.optim.SGD([w], lr=y_lr)
h_opt = torch.optim.Adam([h], lr=x_lr)
w_z_opt = torch.optim.SGD([w_z], lr=z_lr)
C = (float(2) * torch.ones(tSize)).cuda().requires_grad_(False)
def lf(x,y):
out = 0
for i in range(tSize):
out = out + torch.sin((x + y[i] - C[i]))
# print(out)
return out
def uF(x,y):
return torch.norm(x - a) ** 2 - torch.norm(y - a - C) ** 2
print(uF(float(-0.42)*torch.ones(1).cuda(),float(7.14)*torch.ones(tSize).cuda()))
print(lf(float(-0.42)*torch.ones(1).cuda(),float(7.14)*torch.ones(tSize).cuda()))
# z_loss_min=1e-6
for x_itr in range(x_loop*TK):
h_opt.zero_grad()
step_start_time = time.time()
yhis=[]
lr_decay_rate = 1 / (1 ** (math.floor(x_itr / TK)))
if dc=='log':
reg_decay_rate = 1 / (math.log(dcr*math.floor((x_itr+1) / TK)))
elif dc=='power1':
reg_decay_rate = 1 / (dcr ** math.floor((x_itr + 1) / TK))
elif dc=='power2':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK)**dcr)
elif dc == 'linear':
reg_decay_rate = 1 / (math.floor((x_itr + 1) / TK) * dcr)
else:
assert 1
w_opt.param_groups[0]['lr'] = w_opt.defaults['lr'] * lr_decay_rate
h_opt.param_groups[0]['lr'] = h_opt.defaults['lr'] * lr_decay_rate
w_z_opt.param_groups[0]['lr'] = w_z_opt.defaults['lr'] * lr_decay_rate
loss_z_l = 0
for z_itr in range(z_loop):
w_z_opt.zero_grad()
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_z.backward()
# print(loss_z_l-loss_z.item())
w_z_opt.step()
loss_y_l = 0
wl = w
# print(w_z.item())
for y_itr in range(y_loop):
w_opt.zero_grad()
loss_w_f = lf(h,w)
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_w_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_w_ln = y_ln * torch.log(loss_w_f.detach()+tSize+1e-8+loss_z.detach() - loss_w_f)
loss_w_ =uF(h,w)
loss_w = -loss_w_ + loss_w_L2 - loss_w_ln
grad_w_ = torch.autograd.grad(loss_w_, [w], retain_graph=True, allow_unused=True)
grad_w_L2 = torch.autograd.grad(loss_w_L2, [w], retain_graph=True, allow_unused=True)
grad_w_ln = torch.autograd.grad(loss_w_ln, [w], retain_graph=True, allow_unused=True)
loss_w.backward()
torch.nn.utils.clip_grad_norm_([w],10)
w_opt.step()
wl = w
yhis.append(w[0].item())
low_time = time.time() - step_start_time
hyper_time = time.time()
h_opt.zero_grad()
loss_w_f = lf(h,w)
loss_z = lf(h,w_z) + z_L2 * reg_decay_rate * torch.norm(
w_z) ** 2
loss_h_ = uF(h,w)
loss_h_L2 = y_L2 * reg_decay_rate * torch.norm(w) ** 2
loss_h_ln = y_ln * torch.log(loss_w_f.detach()+tSize+1e-8+ loss_z - loss_w_f)
loss_h = loss_h_ + loss_h_ln
grad_h=torch.autograd.grad(loss_h,[h],retain_graph=True)
grad_h_ = torch.autograd.grad(loss_h_, [h], retain_graph=True, allow_unused=True)
grad_h_L2 = torch.autograd.grad(loss_h_L2, [h], retain_graph=True, allow_unused=True)
grad_h_ln = torch.autograd.grad(loss_h_ln, [h], retain_graph=True, allow_unused=True)
loss_h.backward()
torch.nn.utils.clip_grad_norm_([h], 100)
# print(loss_L2([grad_h]))
h_opt.step()
step_time = time.time() - step_start_time
total_time += step_time
total_hyper_time += (time.time() - hyper_time)
if x_itr % TK == 0:
with torch.no_grad():
loss_test = uF(h,w)
loss_train = lf(h,w)
print(
'd={:d},x_itr={:d},F={:.2f},x={:.2f},y={:.2f},x+y={:.2f},gw_={:.2f}, gw_L2={:.2f}, gw_ln=={:.2f},gh={:.2f}, gh_={:.2f}, gh_ln=={:.2f}'.format(
d, x_itr, loss_test.data,
h.item(),w[0].item(),h.item()+w[0].item(),grad_w_[0][0].item(),grad_w_L2[0][0].item(),grad_w_ln[0][0].item(),grad_h[0].item(),grad_h_[0].item(),grad_h_ln[0].item()))
if save_log:
with open(log_path, 'a', encoding='utf-8', newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(
[d, x_itr, loss_test.data, h.norm() / d,
step_time,
total_time,h.item(),w[0].item(),h.item()+w[0].item()]+[yh for yh in yhis])
| 8,740 | 36.515021 | 197 | py |
deep-speaker | deep-speaker-master/setup.py | import os
import platform
from setuptools import setup
tensorflow = 'tensorflow'
if platform.system() == 'Darwin' and platform.processor() == 'arm':
tensorflow = 'tensorflow-macos'
# https://github.com/grpc/grpc/issues/25082
os.environ['GRPC_PYTHON_BUILD_SYSTEM_OPENSSL'] = '1'
os.environ['GRPC_PYTHON_BUILD_SYSTEM_ZLIB'] = '1'
install_requires = [
'numpy',
tensorflow,
'natsort',
'librosa',
'dill',
'python_speech_features',
'tqdm',
'click',
'pandas',
'matplotlib',
'keras',
'numba'
]
setup(
name='deep-speaker',
version='1.0',
description='Deep Speaker',
author='Philippe Remy',
license='MIT',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
packages=['deep_speaker'],
install_requires=install_requires
)
| 853 | 20.897436 | 67 | py |
deep-speaker | deep-speaker-master/cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import click
from deep_speaker.audio import Audio
from deep_speaker.batcher import KerasFormatConverter
from deep_speaker.constants import SAMPLE_RATE, NUM_FRAMES
from deep_speaker.test import test
from deep_speaker.train import start_training
from deep_speaker.utils import ClickType as Ct, ensures_dir
from deep_speaker.utils import init_pandas
logger = logging.getLogger(__name__)
VERSION = '3.0a'
@click.group()
def cli():
logging.basicConfig(format='%(asctime)12s - %(levelname)s - %(message)s', level=logging.INFO)
init_pandas()
@cli.command('version', short_help='Prints the version.')
def version():
print(f'Version is {VERSION}.')
@cli.command('build-mfcc-cache', short_help='Build audio cache.')
@click.option('--working_dir', required=True, type=Ct.output_dir())
@click.option('--audio_dir', default=None)
@click.option('--sample_rate', default=SAMPLE_RATE, show_default=True, type=int)
def build_audio_cache(working_dir, audio_dir, sample_rate):
ensures_dir(working_dir)
if audio_dir is None:
audio_dir = os.path.join(working_dir, 'LibriSpeech')
Audio(cache_dir=working_dir, audio_dir=audio_dir, sample_rate=sample_rate)
@cli.command('build-keras-inputs', short_help='Build inputs to Keras.')
@click.option('--working_dir', required=True, type=Ct.input_dir())
@click.option('--counts_per_speaker', default='600,100', show_default=True, type=str) # train,test
def build_keras_inputs(working_dir, counts_per_speaker):
# counts_per_speaker: If you specify --counts_per_speaker 600,100, that means for each speaker,
# you're going to generate 600 samples for training and 100 for testing. One sample is 160 frames
# by default (~roughly 1.6 seconds).
counts_per_speaker = [int(b) for b in counts_per_speaker.split(',')]
kc = KerasFormatConverter(working_dir)
kc.generate(max_length=NUM_FRAMES, counts_per_speaker=counts_per_speaker)
kc.persist_to_disk()
@cli.command('test-model', short_help='Test a Keras model.')
@click.option('--working_dir', required=True, type=Ct.input_dir())
@click.option('--checkpoint_file', required=True, type=Ct.input_file())
def test_model(working_dir, checkpoint_file=None):
# export CUDA_VISIBLE_DEVICES=0; python cli.py test-model
# --working_dir /home/philippe/ds-test/triplet-training/
# --checkpoint_file ../ds-test/checkpoints-softmax/ResCNN_checkpoint_102.h5
# f-measure = 0.789, true positive rate = 0.733, accuracy = 0.996, equal error rate = 0.043
# export CUDA_VISIBLE_DEVICES=0; python cli.py test-model
# --working_dir /home/philippe/ds-test/triplet-training/
# --checkpoint_file ../ds-test/checkpoints-triplets/ResCNN_checkpoint_175.h5
# f-measure = 0.849, true positive rate = 0.798, accuracy = 0.997, equal error rate = 0.025
test(working_dir, checkpoint_file)
@cli.command('train-model', short_help='Train a Keras model.')
@click.option('--working_dir', required=True, type=Ct.input_dir())
@click.option('--pre_training_phase/--no_pre_training_phase', default=False, show_default=True)
def train_model(working_dir, pre_training_phase):
# PRE TRAINING
# commit a5030dd7a1b53cd11d5ab7832fa2d43f2093a464
# Merge: a11d13e b30e64e
# Author: Philippe Remy <premy.enseirb@gmail.com>
# Date: Fri Apr 10 10:37:59 2020 +0900
# LibriSpeech train-clean-data360 (600, 100). 0.985 on test set (enough for pre-training).
# TRIPLET TRAINING
# [...]
# Epoch 175/1000
# 2000/2000 [==============================] - 919s 459ms/step - loss: 0.0077 - val_loss: 0.0058
# Epoch 176/1000
# 2000/2000 [==============================] - 917s 458ms/step - loss: 0.0075 - val_loss: 0.0059
# Epoch 177/1000
# 2000/2000 [==============================] - 927s 464ms/step - loss: 0.0075 - val_loss: 0.0059
# Epoch 178/1000
# 2000/2000 [==============================] - 948s 474ms/step - loss: 0.0073 - val_loss: 0.0058
start_training(working_dir, pre_training_phase)
if __name__ == '__main__':
cli()
| 4,089 | 39.9 | 101 | py |
deep-speaker | deep-speaker-master/deep_speaker/batcher.py | import json
import logging
import os
from collections import deque, Counter
from random import choice
from time import time
import dill
import numpy as np
from tqdm import tqdm
from deep_speaker.audio import pad_mfcc, Audio
from deep_speaker.constants import NUM_FRAMES, NUM_FBANKS
from deep_speaker.conv_models import DeepSpeakerModel
from deep_speaker.utils import ensures_dir, load_pickle, load_npy, train_test_sp_to_utt
logger = logging.getLogger(__name__)
def extract_speaker(utt_file):
return utt_file.split('/')[-1].split('_')[0]
def sample_from_mfcc(mfcc, max_length):
if mfcc.shape[0] >= max_length:
r = choice(range(0, len(mfcc) - max_length + 1))
s = mfcc[r:r + max_length]
else:
s = pad_mfcc(mfcc, max_length)
return np.expand_dims(s, axis=-1)
def sample_from_mfcc_file(utterance_file, max_length):
mfcc = np.load(utterance_file)
return sample_from_mfcc(mfcc, max_length)
class KerasFormatConverter:
def __init__(self, working_dir, load_test_only=False):
self.working_dir = working_dir
self.output_dir = os.path.join(self.working_dir, 'keras-inputs')
ensures_dir(self.output_dir)
self.categorical_speakers = load_pickle(os.path.join(self.output_dir, 'categorical_speakers.pkl'))
if not load_test_only:
self.kx_train = load_npy(os.path.join(self.output_dir, 'kx_train.npy'))
self.ky_train = load_npy(os.path.join(self.output_dir, 'ky_train.npy'))
self.kx_test = load_npy(os.path.join(self.output_dir, 'kx_test.npy'))
self.ky_test = load_npy(os.path.join(self.output_dir, 'ky_test.npy'))
self.audio = Audio(cache_dir=self.working_dir, audio_dir=None)
if self.categorical_speakers is None:
self.categorical_speakers = SparseCategoricalSpeakers(self.audio.speaker_ids)
def persist_to_disk(self):
with open(os.path.join(self.output_dir, 'categorical_speakers.pkl'), 'wb') as w:
dill.dump(self.categorical_speakers, w)
np.save(os.path.join(self.output_dir, 'kx_train.npy'), self.kx_train)
np.save(os.path.join(self.output_dir, 'kx_test.npy'), self.kx_test)
np.save(os.path.join(self.output_dir, 'ky_train.npy'), self.ky_train)
np.save(os.path.join(self.output_dir, 'ky_test.npy'), self.ky_test)
def generate_per_phase(self, max_length=NUM_FRAMES, num_per_speaker=3000, is_test=False):
# train OR test.
num_speakers = len(self.audio.speaker_ids)
sp_to_utt = train_test_sp_to_utt(self.audio, is_test)
# 64 fbanks 1 channel(s).
# float32
kx = np.zeros((num_speakers * num_per_speaker, max_length, NUM_FBANKS, 1), dtype=np.float32)
ky = np.zeros((num_speakers * num_per_speaker, 1), dtype=np.float32)
desc = f'Converting to Keras format [{"test" if is_test else "train"}]'
for i, speaker_id in enumerate(tqdm(self.audio.speaker_ids, desc=desc)):
utterances_files = sp_to_utt[speaker_id]
for j, utterance_file in enumerate(np.random.choice(utterances_files, size=num_per_speaker, replace=True)):
self.load_into_mat(utterance_file, self.categorical_speakers, speaker_id, max_length, kx, ky,
i * num_per_speaker + j)
return kx, ky
def generate(self, max_length=NUM_FRAMES, counts_per_speaker=(3000, 500)):
kx_train, ky_train = self.generate_per_phase(max_length, counts_per_speaker[0], is_test=False)
kx_test, ky_test = self.generate_per_phase(max_length, counts_per_speaker[1], is_test=True)
logger.info(f'kx_train.shape = {kx_train.shape}')
logger.info(f'ky_train.shape = {ky_train.shape}')
logger.info(f'kx_test.shape = {kx_test.shape}')
logger.info(f'ky_test.shape = {ky_test.shape}')
self.kx_train, self.ky_train, self.kx_test, self.ky_test = kx_train, ky_train, kx_test, ky_test
@staticmethod
def load_into_mat(utterance_file, categorical_speakers, speaker_id, max_length, kx, ky, i):
kx[i] = sample_from_mfcc_file(utterance_file, max_length)
ky[i] = categorical_speakers.get_index(speaker_id)
class SparseCategoricalSpeakers:
def __init__(self, speakers_list):
self.speaker_ids = sorted(speakers_list)
assert len(set(self.speaker_ids)) == len(self.speaker_ids) # all unique.
self.map = dict(zip(self.speaker_ids, range(len(self.speaker_ids))))
def get_index(self, speaker_id):
return self.map[speaker_id]
class OneHotSpeakers:
def __init__(self, speakers_list):
# pylint: disable=E0611,E0401
from tensorflow.keras.utils import to_categorical
self.speaker_ids = sorted(speakers_list)
self.int_speaker_ids = list(range(len(self.speaker_ids)))
self.map_speakers_to_index = dict([(k, v) for (k, v) in zip(self.speaker_ids, self.int_speaker_ids)])
self.map_index_to_speakers = dict([(v, k) for (k, v) in zip(self.speaker_ids, self.int_speaker_ids)])
self.speaker_categories = to_categorical(self.int_speaker_ids, num_classes=len(self.speaker_ids))
def get_speaker_from_index(self, index):
return self.map_index_to_speakers[index]
def get_one_hot(self, speaker_id):
index = self.map_speakers_to_index[speaker_id]
return self.speaker_categories[index]
class LazyTripletBatcher:
def __init__(self, working_dir: str, max_length: int, model: DeepSpeakerModel):
self.working_dir = working_dir
self.audio = Audio(cache_dir=working_dir)
logger.info(f'Picking audio from {working_dir}.')
self.sp_to_utt_train = train_test_sp_to_utt(self.audio, is_test=False)
self.sp_to_utt_test = train_test_sp_to_utt(self.audio, is_test=True)
self.max_length = max_length
self.model = model
self.nb_per_speaker = 2
self.nb_speakers = 640
self.history_length = 4
self.history_every = 100 # batches.
self.total_history_length = self.nb_speakers * self.nb_per_speaker * self.history_length # 25,600
self.metadata_train_speakers = Counter()
self.metadata_output_file = os.path.join(self.working_dir, 'debug_batcher.json')
self.history_embeddings_train = deque(maxlen=self.total_history_length)
self.history_utterances_train = deque(maxlen=self.total_history_length)
self.history_model_inputs_train = deque(maxlen=self.total_history_length)
self.history_embeddings = None
self.history_utterances = None
self.history_model_inputs = None
self.batch_count = 0
for _ in tqdm(range(self.history_length), desc='Initializing the batcher'): # init history.
self.update_triplets_history()
def update_triplets_history(self):
model_inputs = []
speakers = list(self.audio.speakers_to_utterances.keys())
np.random.shuffle(speakers)
selected_speakers = speakers[: self.nb_speakers]
embeddings_utterances = []
for speaker_id in selected_speakers:
train_utterances = self.sp_to_utt_train[speaker_id]
for selected_utterance in np.random.choice(a=train_utterances, size=self.nb_per_speaker, replace=False):
mfcc = sample_from_mfcc_file(selected_utterance, self.max_length)
embeddings_utterances.append(selected_utterance)
model_inputs.append(mfcc)
embeddings = self.model.m.predict(np.array(model_inputs))
assert embeddings.shape[-1] == 512
embeddings = np.reshape(embeddings, (len(selected_speakers), self.nb_per_speaker, 512))
self.history_embeddings_train.extend(list(embeddings.reshape((-1, 512))))
self.history_utterances_train.extend(embeddings_utterances)
self.history_model_inputs_train.extend(model_inputs)
# reason: can't index a deque with a np.array.
self.history_embeddings = np.array(self.history_embeddings_train)
self.history_utterances = np.array(self.history_utterances_train)
self.history_model_inputs = np.array(self.history_model_inputs_train)
with open(self.metadata_output_file, 'w') as w:
json.dump(obj=dict(self.metadata_train_speakers), fp=w, indent=2)
def get_batch(self, batch_size, is_test=False):
return self.get_batch_test(batch_size) if is_test else self.get_random_batch(batch_size, is_test=False)
def get_batch_test(self, batch_size):
return self.get_random_batch(batch_size, is_test=True)
def get_random_batch(self, batch_size, is_test=False):
sp_to_utt = self.sp_to_utt_test if is_test else self.sp_to_utt_train
speakers = list(self.audio.speakers_to_utterances.keys())
anchor_speakers = np.random.choice(speakers, size=batch_size // 3, replace=False)
anchor_utterances = []
positive_utterances = []
negative_utterances = []
for anchor_speaker in anchor_speakers:
negative_speaker = np.random.choice(list(set(speakers) - {anchor_speaker}), size=1)[0]
assert negative_speaker != anchor_speaker
pos_utterances = np.random.choice(sp_to_utt[anchor_speaker], 2, replace=False)
neg_utterance = np.random.choice(sp_to_utt[negative_speaker], 1, replace=True)[0]
anchor_utterances.append(pos_utterances[0])
positive_utterances.append(pos_utterances[1])
negative_utterances.append(neg_utterance)
# anchor and positive should have difference utterances (but same speaker!).
anc_pos = np.array([positive_utterances, anchor_utterances])
assert np.all(anc_pos[0, :] != anc_pos[1, :])
assert np.all(np.array([extract_speaker(s) for s in anc_pos[0, :]]) == np.array(
[extract_speaker(s) for s in anc_pos[1, :]]))
pos_neg = np.array([positive_utterances, negative_utterances])
assert np.all(pos_neg[0, :] != pos_neg[1, :])
assert np.all(np.array([extract_speaker(s) for s in pos_neg[0, :]]) != np.array(
[extract_speaker(s) for s in pos_neg[1, :]]))
batch_x = np.vstack([
[sample_from_mfcc_file(u, self.max_length) for u in anchor_utterances],
[sample_from_mfcc_file(u, self.max_length) for u in positive_utterances],
[sample_from_mfcc_file(u, self.max_length) for u in negative_utterances]
])
batch_y = np.zeros(shape=(len(batch_x), 1)) # dummy. sparse softmax needs something.
return batch_x, batch_y
def get_batch_train(self, batch_size):
from deep_speaker.test import batch_cosine_similarity
# s1 = time()
self.batch_count += 1
if self.batch_count % self.history_every == 0:
self.update_triplets_history()
all_indexes = range(len(self.history_embeddings_train))
anchor_indexes = np.random.choice(a=all_indexes, size=batch_size // 3, replace=False)
# s2 = time()
similar_negative_indexes = []
dissimilar_positive_indexes = []
# could be made parallel.
for anchor_index in anchor_indexes:
# s21 = time()
anchor_embedding = self.history_embeddings[anchor_index]
anchor_speaker = extract_speaker(self.history_utterances[anchor_index])
# why self.nb_speakers // 2? just random. because it is fast. otherwise it's too much.
negative_indexes = [j for (j, a) in enumerate(self.history_utterances)
if extract_speaker(a) != anchor_speaker]
negative_indexes = np.random.choice(negative_indexes, size=self.nb_speakers // 2)
# s22 = time()
anchor_embedding_tile = [anchor_embedding] * len(negative_indexes)
anchor_cos = batch_cosine_similarity(anchor_embedding_tile, self.history_embeddings[negative_indexes])
# s23 = time()
similar_negative_index = negative_indexes[np.argsort(anchor_cos)[-1]] # [-1:]
similar_negative_indexes.append(similar_negative_index)
# s24 = time()
positive_indexes = [j for (j, a) in enumerate(self.history_utterances) if
extract_speaker(a) == anchor_speaker and j != anchor_index]
# s25 = time()
anchor_embedding_tile = [anchor_embedding] * len(positive_indexes)
# s26 = time()
anchor_cos = batch_cosine_similarity(anchor_embedding_tile, self.history_embeddings[positive_indexes])
dissimilar_positive_index = positive_indexes[np.argsort(anchor_cos)[0]] # [:1]
dissimilar_positive_indexes.append(dissimilar_positive_index)
# s27 = time()
# s3 = time()
batch_x = np.vstack([
self.history_model_inputs[anchor_indexes],
self.history_model_inputs[dissimilar_positive_indexes],
self.history_model_inputs[similar_negative_indexes]
])
# s4 = time()
# for anchor, positive, negative in zip(history_utterances[anchor_indexes],
# history_utterances[dissimilar_positive_indexes],
# history_utterances[similar_negative_indexes]):
# print('anchor', os.path.basename(anchor),
# 'positive', os.path.basename(positive),
# 'negative', os.path.basename(negative))
# print('_' * 80)
# assert utterances as well positive != anchor.
anchor_speakers = [extract_speaker(a) for a in self.history_utterances[anchor_indexes]]
positive_speakers = [extract_speaker(a) for a in self.history_utterances[dissimilar_positive_indexes]]
negative_speakers = [extract_speaker(a) for a in self.history_utterances[similar_negative_indexes]]
assert len(anchor_indexes) == len(dissimilar_positive_indexes)
assert len(similar_negative_indexes) == len(dissimilar_positive_indexes)
assert list(self.history_utterances[dissimilar_positive_indexes]) != list(
self.history_utterances[anchor_indexes])
assert anchor_speakers == positive_speakers
assert negative_speakers != anchor_speakers
batch_y = np.zeros(shape=(len(batch_x), 1)) # dummy. sparse softmax needs something.
for a in anchor_speakers:
self.metadata_train_speakers[a] += 1
for a in positive_speakers:
self.metadata_train_speakers[a] += 1
for a in negative_speakers:
self.metadata_train_speakers[a] += 1
# s5 = time()
# print('1-2', s2 - s1)
# print('2-3', s3 - s2)
# print('3-4', s4 - s3)
# print('4-5', s5 - s4)
# print('21-22', (s22 - s21) * (batch_size // 3))
# print('22-23', (s23 - s22) * (batch_size // 3))
# print('23-24', (s24 - s23) * (batch_size // 3))
# print('24-25', (s25 - s24) * (batch_size // 3))
# print('25-26', (s26 - s25) * (batch_size // 3))
# print('26-27', (s27 - s26) * (batch_size // 3))
return batch_x, batch_y
def get_speaker_verification_data(self, anchor_speaker, num_different_speakers):
speakers = list(self.audio.speakers_to_utterances.keys())
anchor_utterances = []
positive_utterances = []
negative_utterances = []
negative_speakers = np.random.choice(list(set(speakers) - {anchor_speaker}), size=num_different_speakers)
assert [negative_speaker != anchor_speaker for negative_speaker in negative_speakers]
pos_utterances = np.random.choice(self.sp_to_utt_test[anchor_speaker], 2, replace=False)
neg_utterances = [np.random.choice(self.sp_to_utt_test[neg], 1, replace=True)[0] for neg in negative_speakers]
anchor_utterances.append(pos_utterances[0])
positive_utterances.append(pos_utterances[1])
negative_utterances.extend(neg_utterances)
# anchor and positive should have difference utterances (but same speaker!).
anc_pos = np.array([positive_utterances, anchor_utterances])
assert np.all(anc_pos[0, :] != anc_pos[1, :])
assert np.all(np.array([extract_speaker(s) for s in anc_pos[0, :]]) == np.array(
[extract_speaker(s) for s in anc_pos[1, :]]))
batch_x = np.vstack([
[sample_from_mfcc_file(u, self.max_length) for u in anchor_utterances],
[sample_from_mfcc_file(u, self.max_length) for u in positive_utterances],
[sample_from_mfcc_file(u, self.max_length) for u in negative_utterances]
])
batch_y = np.zeros(shape=(len(batch_x), 1)) # dummy. sparse softmax needs something.
return batch_x, batch_y
class TripletBatcher:
def __init__(self, kx_train, ky_train, kx_test, ky_test):
self.kx_train = kx_train
self.ky_train = ky_train
self.kx_test = kx_test
self.ky_test = ky_test
speakers_list = sorted(set(ky_train.argmax(axis=1)))
num_different_speakers = len(speakers_list)
assert speakers_list == sorted(set(ky_test.argmax(axis=1))) # train speakers = test speakers.
assert speakers_list == list(range(num_different_speakers))
self.train_indices_per_speaker = {}
self.test_indices_per_speaker = {}
for speaker_id in speakers_list:
self.train_indices_per_speaker[speaker_id] = list(np.where(ky_train.argmax(axis=1) == speaker_id)[0])
self.test_indices_per_speaker[speaker_id] = list(np.where(ky_test.argmax(axis=1) == speaker_id)[0])
# check.
# print(sorted(sum([v for v in self.train_indices_per_speaker.values()], [])))
# print(range(len(ky_train)))
assert sorted(sum([v for v in self.train_indices_per_speaker.values()], [])) == sorted(range(len(ky_train)))
assert sorted(sum([v for v in self.test_indices_per_speaker.values()], [])) == sorted(range(len(ky_test)))
self.speakers_list = speakers_list
def select_speaker_data(self, speaker, n, is_test):
x = self.kx_test if is_test else self.kx_train
indices_per_speaker = self.test_indices_per_speaker if is_test else self.train_indices_per_speaker
indices = np.random.choice(indices_per_speaker[speaker], size=n)
return x[indices]
def get_batch(self, batch_size, is_test=False):
# y = self.ky_test if is_test else self.ky_train
two_different_speakers = np.random.choice(self.speakers_list, size=2, replace=False)
anchor_positive_speaker = two_different_speakers[0]
negative_speaker = two_different_speakers[1]
assert negative_speaker != anchor_positive_speaker
batch_x = np.vstack([
self.select_speaker_data(anchor_positive_speaker, batch_size // 3, is_test),
self.select_speaker_data(anchor_positive_speaker, batch_size // 3, is_test),
self.select_speaker_data(negative_speaker, batch_size // 3, is_test)
])
batch_y = np.zeros(shape=(len(batch_x), len(self.speakers_list)))
return batch_x, batch_y
class TripletBatcherMiner(TripletBatcher):
def __init__(self, kx_train, ky_train, kx_test, ky_test, model: DeepSpeakerModel):
super().__init__(kx_train, ky_train, kx_test, ky_test)
self.model = model
self.num_evaluations_to_find_best_batch = 10
def get_batch(self, batch_size, is_test=False):
if is_test:
return super().get_batch(batch_size, is_test)
max_loss = 0
max_batch = None, None
for i in range(self.num_evaluations_to_find_best_batch):
bx, by = super().get_batch(batch_size, is_test=False) # only train here.
loss = self.model.m.evaluate(bx, by, batch_size=batch_size, verbose=0)
if loss > max_loss:
max_loss = loss
max_batch = bx, by
return max_batch
class TripletBatcherSelectHardNegatives(TripletBatcher):
def __init__(self, kx_train, ky_train, kx_test, ky_test, model: DeepSpeakerModel):
super().__init__(kx_train, ky_train, kx_test, ky_test)
self.model = model
def get_batch(self, batch_size, is_test=False, predict=None):
if predict is None:
predict = self.model.m.predict
from deep_speaker.test import batch_cosine_similarity
num_triplets = batch_size // 3
inputs = []
k = 2 # do not change this.
for speaker in self.speakers_list:
inputs.append(self.select_speaker_data(speaker, n=k, is_test=is_test))
inputs = np.array(inputs) # num_speakers * [k, num_frames, num_fbanks, 1].
embeddings = predict(np.vstack(inputs))
assert embeddings.shape[-1] == 512
# (speaker, utterance, 512)
embeddings = np.reshape(embeddings, (len(self.speakers_list), k, 512))
cs = batch_cosine_similarity(embeddings[:, 0], embeddings[:, 1])
arg_sort = np.argsort(cs)
assert len(arg_sort) > num_triplets
anchor_speakers = arg_sort[0:num_triplets]
anchor_embeddings = embeddings[anchor_speakers, 0]
negative_speakers = sorted(set(self.speakers_list) - set(anchor_speakers))
negative_embeddings = embeddings[negative_speakers, 0]
selected_negative_speakers = []
for anchor_embedding in anchor_embeddings:
cs_negative = [batch_cosine_similarity([anchor_embedding], neg) for neg in negative_embeddings]
selected_negative_speakers.append(negative_speakers[int(np.argmax(cs_negative))])
# anchor with frame 0.
# positive with frame 1.
# negative with frame 0.
assert len(set(selected_negative_speakers).intersection(anchor_speakers)) == 0
negative = inputs[selected_negative_speakers, 0]
positive = inputs[anchor_speakers, 1]
anchor = inputs[anchor_speakers, 0]
batch_x = np.vstack([anchor, positive, negative])
batch_y = np.zeros(shape=(len(batch_x), len(self.speakers_list)))
return batch_x, batch_y
class TripletEvaluator:
def __init__(self, kx_test, ky_test):
self.kx_test = kx_test
self.ky_test = ky_test
speakers_list = sorted(set(ky_test.argmax(axis=1)))
num_different_speakers = len(speakers_list)
assert speakers_list == list(range(num_different_speakers))
self.test_indices_per_speaker = {}
for speaker_id in speakers_list:
self.test_indices_per_speaker[speaker_id] = list(np.where(ky_test.argmax(axis=1) == speaker_id)[0])
assert sorted(sum([v for v in self.test_indices_per_speaker.values()], [])) == sorted(range(len(ky_test)))
self.speakers_list = speakers_list
def _select_speaker_data(self, speaker):
indices = np.random.choice(self.test_indices_per_speaker[speaker], size=1)
return self.kx_test[indices]
def get_speaker_verification_data(self, positive_speaker, num_different_speakers):
all_negative_speakers = list(set(self.speakers_list) - {positive_speaker})
assert len(self.speakers_list) - 1 == len(all_negative_speakers)
negative_speakers = np.random.choice(all_negative_speakers, size=num_different_speakers, replace=False)
assert positive_speaker not in negative_speakers
anchor = self._select_speaker_data(positive_speaker)
positive = self._select_speaker_data(positive_speaker)
data = [anchor, positive]
data.extend([self._select_speaker_data(n) for n in negative_speakers])
return np.vstack(data)
if __name__ == '__main__':
np.random.seed(123)
ltb = LazyTripletBatcher(working_dir='/Users/premy/deep-speaker/',
max_length=NUM_FRAMES,
model=DeepSpeakerModel())
for i in range(1000):
print(i)
start = time()
ltb.get_batch_train(batch_size=9)
print(time() - start)
# ltb.get_batch(batch_size=96)
| 23,984 | 46.401186 | 119 | py |
deep-speaker | deep-speaker-master/deep_speaker/triplet_loss.py | # pylint: disable=E0611,E0401
import tensorflow.keras.backend as K
# ALPHA = 0.2 # used in FaceNet https://arxiv.org/pdf/1503.03832.pdf
ALPHA = 0.1 # used in Deep Speaker.
def batch_cosine_similarity(x1, x2):
# https://en.wikipedia.org/wiki/Cosine_similarity
# 1 = equal direction ; -1 = opposite direction
dot = K.squeeze(K.batch_dot(x1, x2, axes=1), axis=1)
# as values have have length 1, we don't need to divide by norm (as it is 1)
return dot
def deep_speaker_loss(y_true, y_pred, alpha=ALPHA):
# y_true is not used. we respect this convention:
# y_true.shape = (batch_size, embedding_size) [not used]
# y_pred.shape = (batch_size, embedding_size)
# EXAMPLE:
# _____________________________________________________
# ANCHOR 1 (512,)
# ANCHOR 2 (512,)
# POS EX 1 (512,)
# POS EX 2 (512,)
# NEG EX 1 (512,)
# NEG EX 2 (512,)
# _____________________________________________________
split = K.shape(y_pred)[0] // 3
anchor = y_pred[0:split]
positive_ex = y_pred[split:2 * split]
negative_ex = y_pred[2 * split:]
# If the loss does not decrease below ALPHA then the model does not learn anything.
# If all anchor = positive = negative (model outputs the same vector always).
# Then sap = san = 1. and loss = max(alpha,0) = alpha.
# On the contrary if anchor = positive = [1] and negative = [-1].
# Then sap = 1 and san = -1. loss = max(-1-1+0.1,0) = max(-1.9, 0) = 0.
sap = batch_cosine_similarity(anchor, positive_ex)
san = batch_cosine_similarity(anchor, negative_ex)
loss = K.maximum(san - sap + alpha, 0.0)
total_loss = K.mean(loss)
return total_loss
if __name__ == '__main__':
import numpy as np
print(deep_speaker_loss(alpha=0.1, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print(deep_speaker_loss(alpha=1, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print(deep_speaker_loss(alpha=2, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print('--------------')
print(deep_speaker_loss(alpha=2, y_true=0, y_pred=np.array([[0.6], [1.0], [0.0]])))
print(deep_speaker_loss(alpha=1, y_true=0, y_pred=np.array([[0.6], [1.0], [0.0]])))
print(deep_speaker_loss(alpha=0.1, y_true=0, y_pred=np.array([[0.6], [1.0], [0.0]])))
print(deep_speaker_loss(alpha=0.2, y_true=0, y_pred=np.array([[0.6], [1.0], [0.0]])))
print('--------------')
print(deep_speaker_loss(alpha=2, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print(deep_speaker_loss(alpha=1, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print(deep_speaker_loss(alpha=0.1, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
print(deep_speaker_loss(alpha=0.2, y_true=0, y_pred=np.array([[0.9], [1.0], [-1.0]])))
| 2,782 | 42.484375 | 90 | py |
deep-speaker | deep-speaker-master/deep_speaker/conv_models.py | import logging
import os
import numpy as np
import tensorflow as tf
# pylint: disable=E0611,E0401
import tensorflow.keras.backend as K
# pylint: disable=E0611,E0401
from tensorflow.keras import layers, regularizers
# pylint: disable=E0611,E0401
from tensorflow.keras.layers import (
BatchNormalization,
Conv2D,
Dense,
Dropout,
Input,
Lambda,
Reshape,
)
# pylint: disable=E0611,E0401
from tensorflow.keras.models import Model
# pylint: disable=E0611,E0401
from tensorflow.keras.optimizers import Adam
from deep_speaker.constants import NUM_FBANKS, SAMPLE_RATE, NUM_FRAMES
from deep_speaker.triplet_loss import deep_speaker_loss
logger = logging.getLogger(__name__)
@tf.function
def tf_normalize(data, ndims, eps=0, adjusted=False):
data = tf.convert_to_tensor(data, name='data')
reduce_dims = [-i - 1 for i in range(ndims)]
# pylint: disable=E1123,E1120
data = tf.cast(data, dtype=tf.dtypes.float32)
data_num = tf.reduce_prod(data.shape[-ndims:])
data_mean = tf.reduce_mean(data, axis=reduce_dims, keepdims=True)
# Apply a minimum normalization that protects us against uniform images.
stddev = tf.math.reduce_std(data, axis=reduce_dims, keepdims=True)
adjusted_stddev = stddev
if adjusted:
min_stddev = tf.math.rsqrt(tf.cast(data_num, tf.dtypes.float32))
eps = tf.maximum(eps, min_stddev)
if eps > 0:
adjusted_stddev = tf.maximum(adjusted_stddev, eps)
return (data - data_mean) / adjusted_stddev
@tf.function
def tf_fbank(samples):
"""
Compute Mel-filterbank energy features from an audio signal.
See python_speech_features.fbank
"""
frame_length = int(0.025 * SAMPLE_RATE)
frame_step = int(0.01 * SAMPLE_RATE)
fft_length = 512
fft_bins = fft_length // 2 + 1
pre_emphasis = samples[:, 1:] - 0.97 * samples[:, :-1]
# Original implementation from python_speech_features
# frames = tf.expand_dims(sigproc.framesig(preemphasis[0], frame_length,
# frame_step, winfunc=lambda x: np.ones((x,))), 0)
# powspec = sigproc.powspec(frames, fft_length)
# Tensorflow impl #1, using manually-split frames and rfft
# spec = tf.abs(tf.signal.rfft(frames, [fft_length]))
# powspec = tf.square(spec) / fft_length
# Tensorflow impl #2, using stft to handle framing automatically
# (There is a one-off mismatch on the number of frames on the resulting tensor, but I guess this is ok)
spec = tf.abs(tf.signal.stft(pre_emphasis, frame_length, frame_step, fft_length, window_fn=tf.ones))
powspec = tf.square(spec) / fft_length
# Matrix to transform spectrum to mel-frequencies
# Original implementation from python_speech_features
# linear_to_mel_weight_matrix = get_filterbanks(NUM_FBANKS, fft_length,
# SAMPLE_RATE, 0, SAMPLE_RATE/2).astype(np.float32).T
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=NUM_FBANKS,
num_spectrogram_bins=fft_bins,
sample_rate=SAMPLE_RATE,
lower_edge_hertz=0,
upper_edge_hertz=SAMPLE_RATE / 2,
)
feat = tf.matmul(powspec, linear_to_mel_weight_matrix)
# feat = tf.where(feat == 0, np.finfo(np.float32).eps, feat)
return feat
class DeepSpeakerModel:
# I thought it was 3 but maybe energy is added at a 4th dimension.
# would be better to have 4 dimensions:
# MFCC, DIFF(MFCC), DIFF(DIFF(MFCC)), ENERGIES (probably tiled across the frequency domain).
# this seems to help match the parameter counts.
def __init__(
self,
batch_input_shape=(None, NUM_FRAMES, NUM_FBANKS, 1),
include_softmax=False,
num_speakers_softmax=None,
pcm_input=False
):
if pcm_input:
batch_input_shape = None
self.include_softmax = include_softmax
if self.include_softmax:
assert num_speakers_softmax > 0
self.clipped_relu_count = 0
# http://cs231n.github.io/convolutional-networks/
# conv weights
# #params = ks * ks * nb_filters * num_channels_input
# Conv128-s
# 5*5*128*128/2+128
# ks*ks*nb_filters*channels/strides+bias(=nb_filters)
# take 100 ms -> 4 frames.
# if signal is 3 seconds, then take 100ms per 100ms and average out this network.
# 8*8 = 64 features.
# used to share all the layers across the inputs
# num_frames = K.shape() - do it dynamically after.
if pcm_input:
batch_input_shape = batch_input_shape or (None, None) # Batch-size, num-samples
inputs = Input(batch_shape=batch_input_shape, name='raw_inputs')
x = inputs
x = Lambda(tf_fbank)(x)
x = Lambda(lambda x_: tf_normalize(x_, 1, 1e-12))(x)
x = Lambda(lambda x_: tf.expand_dims(x_, axis=-1))(x)
else:
batch_input_shape = batch_input_shape or (None, None, NUM_FBANKS, 1)
inputs = Input(batch_shape=batch_input_shape, name='input')
x = inputs
x = self.cnn_component(x)
x = Reshape((-1, 2048))(x)
# Temporal average layer. axis=1 is time.
x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x)
if include_softmax:
logger.info('Including a Dropout layer to reduce overfitting.')
# used for softmax because the dataset we pre-train on might be too small. easy to overfit.
x = Dropout(0.5)(x)
x = Dense(512, name='affine')(x)
if include_softmax:
# Those weights are just when we train on softmax.
x = Dense(num_speakers_softmax, activation='softmax')(x)
else:
# Does not contain any weights.
x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x)
self.m = Model(inputs, x, name='ResCNN')
def keras_model(self):
return self.m
def get_weights(self):
w = self.m.get_weights()
if self.include_softmax:
w.pop() # last 2 are the W_softmax and b_softmax.
w.pop()
return w
def clipped_relu(self, inputs):
relu = Lambda(lambda y: K.minimum(K.maximum(y, 0), 20), name=f'clipped_relu_{self.clipped_relu_count}')(inputs)
self.clipped_relu_count += 1
return relu
def identity_block(self, input_tensor, kernel_size, filters, stage, block):
conv_name_base = f'res{stage}_{block}_branch'
x = Conv2D(filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2a')(input_tensor)
x = BatchNormalization(name=conv_name_base + '_2a_bn')(x)
x = self.clipped_relu(x)
x = Conv2D(
filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2b',
)(x)
x = BatchNormalization(name=conv_name_base + '_2b_bn')(x)
x = self.clipped_relu(x)
x = layers.add([x, input_tensor])
x = self.clipped_relu(x)
return x
def conv_and_res_block(self, inp, filters, stage):
conv_name = 'conv{}-s'.format(filters)
# TODO: why kernel_regularizer?
o = Conv2D(filters,
kernel_size=5,
strides=2,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001), name=conv_name)(inp)
o = BatchNormalization(name=conv_name + '_bn')(o)
o = self.clipped_relu(o)
for i in range(3):
o = self.identity_block(o, kernel_size=3, filters=filters, stage=stage, block=i)
return o
def cnn_component(self, inp):
x = self.conv_and_res_block(inp, 64, stage=1)
x = self.conv_and_res_block(x, 128, stage=2)
x = self.conv_and_res_block(x, 256, stage=3)
x = self.conv_and_res_block(x, 512, stage=4)
return x
def set_weights(self, w):
for layer, layer_w in zip(self.m.layers, w):
layer.set_weights(layer_w)
logger.info(f'Setting weights for [{layer.name}]...')
def main():
# Looks correct to me.
# I have 37K but paper reports 41K. which is not too far.
dsm = DeepSpeakerModel()
dsm.m.summary()
# I suspect num frames to be 32.
# Then fbank=64, then total would be 32*64 = 2048.
# plot_model(dsm.m, to_file='model.png', dpi=300, show_shapes=True, expand_nested=True)
def _train():
# x = np.random.uniform(size=(6, 32, 64, 4)) # 6 is multiple of 3.
# y_softmax = np.random.uniform(size=(6, 100))
# dsm = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=True, num_speakers_softmax=100)
# dsm.m.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy')
# print(dsm.m.predict(x).shape)
# print(dsm.m.evaluate(x, y_softmax))
# w = dsm.get_weights()
dsm = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=False)
# dsm.m.set_weights(w)
dsm.m.compile(optimizer=Adam(lr=0.01), loss=deep_speaker_loss)
# it works!!!!!!!!!!!!!!!!!!!!
# unit_batch_size = 20
# anchor = np.ones(shape=(unit_batch_size, 32, 64, 4))
# positive = np.array(anchor)
# negative = np.ones(shape=(unit_batch_size, 32, 64, 4)) * (-1)
# batch = np.vstack((anchor, positive, negative))
# x = batch
# y = np.zeros(shape=(len(batch), 512)) # not important.
# print('Starting to fit...')
# while True:
# print(dsm.m.train_on_batch(x, y))
# should not work... and it does not work!
unit_batch_size = 20
negative = np.ones(shape=(unit_batch_size, 32, 64, 4)) * (-1)
batch = np.vstack((negative, negative, negative))
x = batch
y = np.zeros(shape=(len(batch), 512)) # not important.
print('Starting to fit...')
while True:
print(dsm.m.train_on_batch(x, y))
def _test_checkpoint_compatibility():
dsm = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=True, num_speakers_softmax=10)
dsm.m.save_weights('test.h5')
dsm = DeepSpeakerModel(batch_input_shape=(None, 32, 64, 4), include_softmax=False)
dsm.m.load_weights('test.h5', by_name=True)
os.remove('test.h5')
if __name__ == '__main__':
_test_checkpoint_compatibility()
| 10,741 | 35.16835 | 119 | py |
deep-speaker | deep-speaker-master/deep_speaker/train.py | import logging
import os
# pylint: disable=E0611,E0401
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
# pylint: disable=E0611,E0401
from tensorflow.keras.optimizers import SGD
from tqdm import tqdm
from deep_speaker.batcher import KerasFormatConverter, LazyTripletBatcher
from deep_speaker.constants import BATCH_SIZE, CHECKPOINTS_SOFTMAX_DIR, CHECKPOINTS_TRIPLET_DIR, NUM_FRAMES, NUM_FBANKS
from deep_speaker.conv_models import DeepSpeakerModel
from deep_speaker.triplet_loss import deep_speaker_loss
from deep_speaker.utils import load_best_checkpoint, ensures_dir
logger = logging.getLogger(__name__)
# Otherwise it's just too much logging from Tensorflow...
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def fit_model(dsm: DeepSpeakerModel, working_dir: str, max_length: int = NUM_FRAMES, batch_size=BATCH_SIZE):
batcher = LazyTripletBatcher(working_dir, max_length, dsm)
# build small test set.
test_batches = []
for _ in tqdm(range(200), desc='Build test set'):
test_batches.append(batcher.get_batch_test(batch_size))
def test_generator():
while True:
for bb in test_batches:
yield bb
def train_generator():
while True:
yield batcher.get_random_batch(batch_size, is_test=False)
checkpoint_name = dsm.m.name + '_checkpoint'
checkpoint_filename = os.path.join(CHECKPOINTS_TRIPLET_DIR, checkpoint_name + '_{epoch}.h5')
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_filename, save_best_only=True)
dsm.m.fit(x=train_generator(), y=None, steps_per_epoch=2000, shuffle=False,
epochs=1000, validation_data=test_generator(), validation_steps=len(test_batches),
callbacks=[checkpoint])
def fit_model_softmax(dsm: DeepSpeakerModel, kx_train, ky_train, kx_test, ky_test,
batch_size=BATCH_SIZE, max_epochs=1000, initial_epoch=0):
checkpoint_name = dsm.m.name + '_checkpoint'
checkpoint_filename = os.path.join(CHECKPOINTS_SOFTMAX_DIR, checkpoint_name + '_{epoch}.h5')
checkpoint = ModelCheckpoint(monitor='val_accuracy', filepath=checkpoint_filename, save_best_only=True)
# if the accuracy does not increase by 0.1% over 20 epochs, we stop the training.
early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=20, verbose=1, mode='max')
# if the accuracy does not increase over 10 epochs, we reduce the learning rate by half.
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=10, min_lr=0.0001, verbose=1)
max_len_train = len(kx_train) - len(kx_train) % batch_size
kx_train = kx_train[0:max_len_train]
ky_train = ky_train[0:max_len_train]
max_len_test = len(kx_test) - len(kx_test) % batch_size
kx_test = kx_test[0:max_len_test]
ky_test = ky_test[0:max_len_test]
dsm.m.fit(x=kx_train,
y=ky_train,
batch_size=batch_size,
epochs=initial_epoch + max_epochs,
initial_epoch=initial_epoch,
verbose=1,
shuffle=True,
validation_data=(kx_test, ky_test),
callbacks=[early_stopping, reduce_lr, checkpoint])
def start_training(working_dir, pre_training_phase=True):
ensures_dir(CHECKPOINTS_SOFTMAX_DIR)
ensures_dir(CHECKPOINTS_TRIPLET_DIR)
batch_input_shape = [None, NUM_FRAMES, NUM_FBANKS, 1]
if pre_training_phase:
logger.info('Softmax pre-training.')
kc = KerasFormatConverter(working_dir)
num_speakers_softmax = len(kc.categorical_speakers.speaker_ids)
dsm = DeepSpeakerModel(batch_input_shape, include_softmax=True, num_speakers_softmax=num_speakers_softmax)
dsm.m.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
pre_training_checkpoint = load_best_checkpoint(CHECKPOINTS_SOFTMAX_DIR)
if pre_training_checkpoint is not None:
initial_epoch = int(pre_training_checkpoint.split('/')[-1].split('.')[0].split('_')[-1])
logger.info(f'Initial epoch is {initial_epoch}.')
logger.info(f'Loading softmax checkpoint: {pre_training_checkpoint}.')
dsm.m.load_weights(pre_training_checkpoint) # latest one.
else:
initial_epoch = 0
fit_model_softmax(dsm, kc.kx_train, kc.ky_train, kc.kx_test, kc.ky_test, initial_epoch=initial_epoch)
else:
logger.info('Training with the triplet loss.')
dsm = DeepSpeakerModel(batch_input_shape, include_softmax=False)
triplet_checkpoint = load_best_checkpoint(CHECKPOINTS_TRIPLET_DIR)
pre_training_checkpoint = load_best_checkpoint(CHECKPOINTS_SOFTMAX_DIR)
if triplet_checkpoint is not None:
logger.info(f'Loading triplet checkpoint: {triplet_checkpoint}.')
dsm.m.load_weights(triplet_checkpoint)
elif pre_training_checkpoint is not None:
logger.info(f'Loading pre-training checkpoint: {pre_training_checkpoint}.')
# If `by_name` is True, weights are loaded into layers only if they share the
# same name. This is useful for fine-tuning or transfer-learning models where
# some of the layers have changed.
dsm.m.load_weights(pre_training_checkpoint, by_name=True)
dsm.m.compile(optimizer=SGD(), loss=deep_speaker_loss)
fit_model(dsm, working_dir, NUM_FRAMES)
| 5,455 | 47.714286 | 119 | py |
samurai | samurai-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Samurai'
author = 'samurai developers'
# The short X.Y version
version = '0.0'
# The full version, including alpha/beta/rc tags
release = '0.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'breathe',
'sphinx_rtd_theme',
]
breathe_projects = { 'samurai': '../xml' }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
rst_epilog = f"""
.. |project| replace:: {project}
.. role:: cpp_code(code)
:language: c++
"""
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_css_files = [
'css/custom.css',
]
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Samuraidoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '''
\\fvset{fontsize=\\scriptsize}
''',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'samurai.tex', 'Samurai Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'samurai', 'Samurai Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Samurai', 'Samurai Documentation',
author, 'Samurai developers', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Breathe configuration for readthedocs -----------------------------------
import subprocess, os
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
subprocess.call('cd ..; doxygen', shell=True)
| 5,971 | 27.037559 | 79 | py |
LAMP | LAMP-main/loop_closure/src/loop_closure_batcher.py | from gnn.gnn_model import LoopClosureGNN
import torch
from pose_graph_msgs.msg import PoseGraph, LoopCandidateArray, LoopComputationStatus
import rospy
from copy import deepcopy
import traceback
import os
from subset_algorithms.heuristic import MaximallySeperatedNodes, MaximumCovarianceNodes, Random
from subset_algorithms.gnn import BinomialCEM, SimulatedAnnealing, DiscretePSO, DiscreteTabuSearch
import threading
from concurrent.futures import ThreadPoolExecutor
import rospkg
def edge_selection_algorithm_dispatch(algorithm_name, lock):
if algorithm_name == "cem":
return BinomialCEM(lock)
elif algorithm_name == "random":
return Random(lock)
elif algorithm_name == "heuristic_maximally_separated_nodes":
return MaximallySeperatedNodes(lock)
elif algorithm_name == "heuristic_maximum_covariance":
return MaximumCovarianceNodes(lock)
elif algorithm_name == "simulated_annealing":
return SimulatedAnnealing(lock)
elif algorithm_name == "pso":
return DiscretePSO(lock)
elif algorithm_name == "tabu":
return DiscreteTabuSearch(lock)
else:
raise Exception("Edge subset selection algorithm unknown {algorithm_name}")
class LoopClosureBatcher:
def __init__(self, params):
rospack = rospkg.RosPack()
cur_path = rospack.get_path("loop_closure")
self.loop_closures_to_choose = params["number_of_loop_closures"]
self.min_queue_size = params["min_queue_size"]
self.min_processing_time_seconds = params["min_processing_time_seconds"]
self.verbose = params["verbose"]
try:
with open(os.path.join(cur_path, "model", "current_gnn_model.pkl"), "rb") as f:
mdict = torch.load(f)
except Exception as e:
self.log_warn("Couldn't load GPU model due to " + str(e) + ", loading on CPU")
with open(os.path.join(cur_path, "model", "current_gnn_model.pkl"), "rb") as f:
mdict = torch.load(f, map_location=torch.device('cpu'))
self.model = LoopClosureGNN(64, 7, 4, 1)
self.model.load_state_dict(mdict)
self.model.eval()
self.pg_raw = None
self.current_pose_graph_nodes = None
self.current_pose_graph_edges = None
self.current_pose_graph_edge_atrrs = None
self.solution_lock = threading.RLock()
self.queue_update_lock = threading.RLock()
self.edge_subset_algorithm = edge_selection_algorithm_dispatch(params["algorithm"], self.solution_lock)
self.edge_subset_algorithm.set_min_processing_seconds(self.min_processing_time_seconds)
self.edge_subset_algorithm.set_verbose(self.verbose)
self.edge_subset_algorithm.set_params(params)
self.keys_to_loop_candidates = dict()
self.thread_pool = ThreadPoolExecutor(max_workers=2)
self.current_edge_future = None
self.previous_batch = None
self.current_pose_graph = None
def log_debug(self, s):
if self.verbose >= 3:
rospy.loginfo("Batcher Node: " + s)
def log_info(self, s):
if self.verbose >= 2:
rospy.loginfo("Batcher Node: " + s)
def log_warn(self, s):
if self.verbose >= 2:
rospy.logwarn("Batcher Node: " + s)
def log_error(self, s):
if self.verbose >= 1:
rospy.logerr("Batcher Node: " + s)
def create_publishers(self):
self.loop_publisher = rospy.Publisher(rospy.resolve_name("~prioritized_loop_candidates"), LoopCandidateArray, queue_size=100)
def register_callbacks(self):
"""
Handle setting up ROS callbacks and services
"""
rospy.Subscriber(rospy.resolve_name("~loop_candidates"), LoopCandidateArray,
self.add_loop_closure_to_queue)
rospy.Subscriber(rospy.resolve_name("~pose_graph"), PoseGraph,
self.handle_pose_graph_message)
rospy.Subscriber(rospy.resolve_name("~loop_computation_status"), LoopComputationStatus, self.handle_status)
rospy.Subscriber(rospy.resolve_name("~output_loop_closures"), LoopCandidateArray, self.remove_loop_closures_from_queue)
def make_key(self, loop_candidate):
return (loop_candidate.key_to, loop_candidate.key_from, loop_candidate.type)
def remove_loop_closures_from_queue(self, loop_candidate_array):
with self.queue_update_lock:
size_before = len(self.keys_to_loop_candidates)
for loop_candidate in loop_candidate_array.candidates:
key= self.make_key(loop_candidate)
try:
del self.keys_to_loop_candidates[key]
except KeyError:
pass
size_after = len(self.keys_to_loop_candidates)
if size_before - size_after != 0:
self.log_debug("Batcher Removed " + str(size_before - size_after) + " elements.")
def add_loop_closure_to_queue(self, loop_candidate_array):
with self.queue_update_lock:
for loop_candidate in loop_candidate_array.candidates:
key = self.make_key(loop_candidate)
self.keys_to_loop_candidates[key] = loop_candidate
self.log_info("Batcher received :" + str(len(loop_candidate_array.candidates)) + " Queue Size: " + str(
len(self.keys_to_loop_candidates)))
if self.solution_lock.acquire():
have_enough_candidates = len(self.keys_to_loop_candidates) > self.min_queue_size
# If we have enough loop closures then start processing
if self.current_edge_future is None and have_enough_candidates:
self.start_finding_new_batch()
self.current_edge_future = self.thread_pool.submit(self.edge_subset_algorithm.run)
# If there were previously not enough edges to do selection but now there are, restart processing
elif not self.edge_subset_algorithm.is_processing and len(self.keys_to_loop_candidates) > self.min_queue_size:
self.start_finding_new_batch()
elif have_enough_candidates and self.edge_subset_algorithm.stale_solution:
self.start_finding_new_batch()
self.solution_lock.release()
def handle_pose_graph_message(self, pose_graph):
"""
update internal pose graph representation when a new pose graph is published
:return:
"""
if self.current_pose_graph is None or pose_graph.header.stamp >= self.current_pose_graph.header.stamp:
self.current_pose_graph = pose_graph
def start_finding_new_batch(self):
working_loop_candidates = deepcopy(self.keys_to_loop_candidates.values())
if (self.loop_closures_to_choose < 1):
number_of_loop_closures_to_choose = max(1, int(
float(len(working_loop_candidates)) * self.loop_closures_to_choose))
else:
number_of_loop_closures_to_choose = min(len(working_loop_candidates), self.loop_closures_to_choose)
if len(working_loop_candidates) < self.min_queue_size:
self.edge_subset_algorithm.stop_processing()
else:
self.log_debug("Going to find " + str(number_of_loop_closures_to_choose) + ", Queue Size: " + str(len(working_loop_candidates)) + ", # to choose " + str(self.loop_closures_to_choose))
self.edge_subset_algorithm.initialize(self.model, number_of_loop_closures_to_choose,
self.current_pose_graph, working_loop_candidates)
def handle_status(self, status):
"""
Handle the loop closure computation status
"""
if (status.type == status.COMPLETED_ALL):
with self.solution_lock:
if self.edge_subset_algorithm.stale_solution:
#self.log_debug("Stale Solution")
return None
outbound_edge_indexes = self.edge_subset_algorithm.get_current_solution()
if outbound_edge_indexes is None:
return None
else:
self.previous_batch = outbound_edge_indexes
try:
self.edge_subset_algorithm.stop_processing()
out_edges = []
out_bundle = LoopCandidateArray()
out_bundle.originator = 1
for outbound_edge_index in outbound_edge_indexes:
try:
edge = self.edge_subset_algorithm.loop_candidates[outbound_edge_index]
out_edges.append(edge)
except IndexError:
self.log_warn("Outbound edge index is not in loop candidates")
out_bundle.candidates = out_edges
for edge in out_edges:
key = self.make_key(edge)
try:
del self.keys_to_loop_candidates[key]
except KeyError:
pass
self.log_info("Out " + str(len(out_edges)) + ", Queue Size: " + str(len(self.keys_to_loop_candidates)))
self.start_finding_new_batch()
self.loop_publisher.publish(out_bundle)
except Exception as e:
self.log_error("Failed to make batch {}".format(traceback.format_exc()))
else:
self.log_error("Unkown status {}".format(status))
| 9,763 | 44.840376 | 196 | py |
LAMP | LAMP-main/loop_closure/src/subset_algorithms/gnn.py | from __future__ import division
from copy import deepcopy
import math
import time
from subset_algorithms.util import constuct_pytorch_geometric_graph, run_model_on_list_of_graphs, choices, fast_deepcopy
import rospy
import numpy as np
from numpy.random import binomial
import random
from subset_algorithms.heuristic import MaximallySeperatedNodes, MaximumCovarianceNodes, Random
from subset_algorithms.base import GNNBasedLoopClosureBatchSelector
class BinomialCEM(GNNBasedLoopClosureBatchSelector):
def __init__(self, r_lock, min_processing_seconds=30):
super(BinomialCEM, self).__init__(r_lock, min_processing_seconds)
self.initialized = False
def make_histogram_string(self, probabilities, max_bins=10):
hist, bin_edges = np.histogram(probabilities, bins=min(probabilities.shape[0], max_bins))
s = ""
for value, left_edge, right_edge in zip(hist, bin_edges, bin_edges[1:]):
s += "%.2f : %d " % (((right_edge + left_edge) / 2.0 * 100), value)
return s
def set_params(self, parameters):
self.samples = parameters["cem"]["samples"]
self.alpha = parameters["cem"]["alpha"]
self.ptile = parameters["cem"]["ptile"]
def initialize_method(self):
self.probabilities = np.ones(len(self.possible_new_edges)) * .5
self.p_quantile_error = float("inf")
self.running_iterations = 0
self.initialized = True
def step(self):
# Sample graphs
cur_samples = []
for _ in range(self.samples):
cur_samples.append(binomial(1, self.probabilities))
# Make sure samples have exactly num_to_select entries which are 1
for i in range(self.samples):
flip_direction = 0
if np.sum(cur_samples[i]) < self.number_of_loop_closures_to_select:
flip_direction = 1
num_samples_to_make_1 = self.number_of_loop_closures_to_select - np.sum(cur_samples[i])
idxs_which_are_zero = [idx for idx in range(cur_samples[i].shape[0]) if cur_samples[i][idx] == 0]
probs_for_idxs = [self.probabilities[idx] for idx in idxs_which_are_zero]
idxs_to_flip_to_one = np.random.choice(idxs_which_are_zero, num_samples_to_make_1, replace=False,
p=probs_for_idxs / np.sum(probs_for_idxs))
for idx in idxs_to_flip_to_one:
cur_samples[i][idx] = 1
if np.sum(cur_samples[i]) > self.number_of_loop_closures_to_select:
flip_direction = -1
num_samples_to_make_0 = np.sum(cur_samples[i]) - self.number_of_loop_closures_to_select
idxs_which_are_one = [idx for idx in range(cur_samples[i].shape[0]) if cur_samples[i][idx] == 1]
probs_for_idxs = [1 - self.probabilities[idx] for idx in idxs_which_are_one]
idxs_to_flip_to_zero = np.random.choice(idxs_which_are_one, num_samples_to_make_0, replace=False,
p=probs_for_idxs / np.sum(probs_for_idxs))
for idx in idxs_to_flip_to_zero:
cur_samples[i][idx] = 0
assert np.sum(
cur_samples[i]) == self.number_of_loop_closures_to_select, "Sum: %d Should be %d Flip Direction %d" % (np.sum(cur_samples[i]), self.number_of_loop_closures_to_select, flip_direction)
# Compute fitness
graphs_to_run = []
for cur_sample in cur_samples:
edges = []
edge_attrs = []
included = 0
for edge_idx,include in enumerate(cur_sample):
if include == 1:
try:
edges.append(self.possible_new_edges[edge_idx])
edge_attrs.append(self.possible_edge_attrs[edge_idx])
included += 1
except IndexError:
self.log_debug("IndexError")
edges = np.concatenate((self.current_pose_graph_edges,np.array(edges)))
edge_attrs = np.concatenate((self.current_pose_graph_edge_atrrs,np.array(edge_attrs)))
assert included == self.number_of_loop_closures_to_select, "Included {}, Required {}".format(included,self.number_of_loop_closures_to_select)
graphs_to_run.append(
constuct_pytorch_geometric_graph(self.current_pose_graph_nodes, edges,
edge_attrs))
errors = run_model_on_list_of_graphs(self.gnn_model, graphs_to_run)
new_probabilities = np.zeros(len(self.possible_new_edges))
self.p_quantile_error = min(np.percentile(errors, self.ptile), self.p_quantile_error)
number_of_included_samples = 0
for fitness, sample_idx in zip(errors, list(range(self.samples))):
if fitness <= self.p_quantile_error:
new_probabilities += np.array(cur_samples[sample_idx])
number_of_included_samples += 1
if number_of_included_samples != 0:
new_probabilities /= number_of_included_samples
new_probabilities = (1 - self.alpha) * self.probabilities + self.alpha * new_probabilities
self.probabilities = new_probabilities
hist_string = self.make_histogram_string(self.probabilities)
self.log_debug(
"CEM: {} bound {} effectivity {}, Errors N({:e},{:e}) Range: {:e} - {}".format(
self.running_iterations,self.p_quantile_error, number_of_included_samples / self.samples,np.mean(errors),np.std(errors, dtype=np.float64), np.max(errors)-np.min(errors),hist_string))
self.running_iterations += 1
def currently_computed_solution_(self):
if not self.initialized and self.running_iterations == 0:
return None
else:
# Sample final output (ML Sample)
best_edges = sorted(zip(self.probabilities, list(range(len(self.possible_new_edges)))), reverse=True)[
:self.number_of_loop_closures_to_select]
out_edges = []
for p, new_edge_idx in best_edges:
out_edges.append(new_edge_idx)
return out_edges
class SimulatedAnnealing(GNNBasedLoopClosureBatchSelector):
def __init__(self, r_lock, min_processing_seconds=30):
super(SimulatedAnnealing, self).__init__(r_lock, min_processing_seconds)
# This fallback is done because convex hulling can take a long time for a lot of closures
self.initial_solution_finder = MaximallySeperatedNodes(r_lock, use_hull=True)
self.fallback_initial_solution_finder = MaximallySeperatedNodes(r_lock, use_hull=False)
self.max_number_of_loop_closures_to_use_initial_solution = 200
self.initial_solution_finder_iterations = 30
self.fallback_solution_finder_iterations = 100
self.steps = 0
self.min_temp = 1e-4
def set_params(self, params):
self.initial_solution_finder_iterations = params["simulated_annealing"]["initial_solution_finder_iterations"]
self.fallback_solution_finder_iterations = params["simulated_annealing"]["fallback_solution_finder_iterations"]
self.min_temp = params["simulated_annealing"]["min_temp"]
self.Tmax = params["simulated_annealing"]["max_temp"]
self.cooling = params["simulated_annealing"]["cooling"]
self.max_number_of_loop_closures_to_use_initial_solution = params["simulated_annealing"]["max_number_of_loop_closures_to_use_initial_solution"]
def initialize_method(self):
self.initial_solution_finder.initialize(self.gnn_model, self.number_of_loop_closures_to_select, self.pose_graph,
self.loop_candidates)
self.fallback_initial_solution_finder.initialize(self.gnn_model, self.number_of_loop_closures_to_select,
self.pose_graph,
self.loop_candidates)
self.found_initial_solution = False
self.steps = 0
def set_verbose(self, verbose):
super(SimulatedAnnealing, self).set_verbose(verbose)
self.initial_solution_finder.set_verbose(verbose)
self.fallback_initial_solution_finder.set_verbose(verbose)
def find_initial_solution(self):
self.log_debug("Finding initial solution")
if len(self.loop_candidates) <= self.max_number_of_loop_closures_to_use_initial_solution:
for i in range(self.initial_solution_finder_iterations):
self.initial_solution_finder.step()
starting_solution = self.initial_solution_finder.currently_computed_solution_()
method = "Base"
else:
for i in range(self.fallback_solution_finder_iterations):
self.fallback_initial_solution_finder.step()
starting_solution = self.fallback_initial_solution_finder.currently_computed_solution_()
method = "Fallback"
if starting_solution is None:
self.log_warn("Couldn't find initial solution, falling back")
starting_solution = np.random.randint(0, len(self.loop_candidates)-1, size=self.number_of_loop_closures_to_select).tolist()
method = "Random"
self.log_debug("%s : Starting solution length: %d, Supposed to be %d" % (method, len(starting_solution),self.number_of_loop_closures_to_select))
self.all_points = []
keys_to_nodes = dict()
for node in self.pose_graph.nodes:
keys_to_nodes[node.key] = node
for idx, edge in enumerate(self.loop_candidates):
to_node = keys_to_nodes[edge.key_to]
from_node = keys_to_nodes[edge.key_from]
average_x = (to_node.pose.position.x + from_node.pose.position.x) / 2
average_y = (to_node.pose.position.y + from_node.pose.position.y) / 2
average_z = (to_node.pose.position.z + from_node.pose.position.z) / 2
self.all_points.append(np.array([average_x, average_y, average_z]))
self.all_points = np.array(self.all_points)
# self.distances = np.zeros((len(self.all_points), len(self.all_points)))
# for i in range(len(self.all_points)):
# for j in range(len(self.all_points)):
# if i != j:
# self.distances[i, j] = 1 / (np.linalg.norm(self.all_points[i] - self.all_points[j]) + 1e-4)
self.log_debug("Found initial solution")
self.state = starting_solution
self.T = self.Tmax
self.E = self.energy()
self.prevState = fast_deepcopy(self.state)
self.prevEnergy = self.E
self.best_state = fast_deepcopy(self.state)
self.best_energy = self.E
self.edge_idxs_not_in_solution = list(set(range(len(self.possible_new_edges))).difference(self.state))
self.steps = 1
self.accepts = 0
self.improves = 0
def step(self):
if not self.found_initial_solution:
self.find_initial_solution()
self.take_annealing_step()
self.found_initial_solution = True
def take_annealing_step(self):
self.steps += 1
T = max(self.Tmax * ((1 - self.cooling) ** self.steps), self.min_temp)
self.move()
self.E = self.energy()
dE = self.E - self.prevEnergy
if dE > 0.0 and math.exp(-dE / T) < random.random():
# Restore previous state
self.state = fast_deepcopy(self.prevState)
self.E = self.prevEnergy
else:
# Accept new state and compare to best state
self.accepts += 1
if dE < 0.0:
self.improves += 1
self.prevState = fast_deepcopy(self.state)
self.prevEnergy = self.E
if self.E < self.best_energy:
self.best_state = fast_deepcopy(self.state)
self.best_energy = self.E
if (self.steps % 100) == 0:
self.log_progress(
self.steps, T, self.E, self.accepts / 100.0, self.improves / 100.0)
self.accepts, self.improves = 0, 0
def currently_computed_solution_(self):
if self.found_initial_solution and self.steps >= 1:
T = max(self.Tmax * ((1 - self.cooling) ** self.steps), self.min_temp)
self.log_debug("Final Step: {}, E: {}, T:{}".format(self.steps,self.best_energy,T))
return self.best_state
else:
return None
def move(self, pow=1):
swapped_out_idx = random.randint(0, len(self.state) - 1)
old_edge = self.state[swapped_out_idx]
# Inverse distance weighted
if len(self.edge_idxs_not_in_solution) < 500:
jump_idxs = self.edge_idxs_not_in_solution
else:
jump_idxs = random.sample(self.edge_idxs_not_in_solution,500)
distance_weights = 1/ (np.linalg.norm(self.all_points[jump_idxs,:] - self.all_points[old_edge,:],ord=2,axis=1) + 1e-7)
new_edge = choices(jump_idxs, distance_weights, k=1)[0]
# Perform the swap
self.state.pop(swapped_out_idx)
self.edge_idxs_not_in_solution.remove(new_edge)
self.state.append(new_edge)
self.edge_idxs_not_in_solution.append(old_edge)
#assert len(self.state) == self.num_to_select, f"State size didn't change, M {len(self.state) }, # choose {self.num_to_select}"
return (new_edge, old_edge)
def energy(self):
edges = []
edge_attrs = []
for edge_idx in self.state:
try:
edges.append(self.possible_new_edges[edge_idx])
edge_attrs.append(self.possible_edge_attrs[edge_idx])
except IndexError:
self.log_debug("IndexError")
edges = np.concatenate((self.current_pose_graph_edges,np.array(edges)))
edge_attrs = np.concatenate((self.current_pose_graph_edge_atrrs,np.array(edge_attrs)))
return run_model_on_list_of_graphs(self.gnn_model,
[constuct_pytorch_geometric_graph(self.current_pose_graph_nodes, edges, edge_attrs)])[0]
def log_progress(self, step, T, E, acceptance, improvement):
self.log_debug(
'Temp: {Temp:12.5f} Energy: {Energy:12.2f} Accept: {Accept:7.2%} Improve: {Improve:7.2%} S: {Steps:f} '
.format(Temp=T,
Energy=E,
Accept=acceptance,
Improve=improvement,
Steps=step,
))
class DiscretePSO(GNNBasedLoopClosureBatchSelector):
"""
Discrete Particle Swarm Optimization from "A New Discrete Particle Swarm Optimization Algorithm" by Strasser, Sheppard, and Butcher
"""
def set_params(self, params):
self.number_of_particles = params["pso"]["number_of_particles"]
self.max_velocity = params["pso"]["max_velocity"]
self.scaling_factor = params["pso"]["scaling_factor"]
self.interia = params["pso"]["inertia"]
self.cognitive = params["pso"]["cognitive"]
self.social = params["pso"]["social"]
self.topology = params["pso"]["topology"]
assert self.topology in ["global","ring"]
def initialize_method(self):
self.initialize_particles_and_velocities_and_bests()
self.i = 0
def step(self):
#Calculate new velocities and particles
for p,particle in enumerate(self.particles):
cognitive_mix = random.uniform(0,self.cognitive)
social_mix = random.uniform(0,self.social)
if self.topology == "global":
topological_best = self.global_best_position
elif self.topology == "ring":
next_idx = (p + 1) % self.number_of_particles
prev_idx = (p - 1) % self.number_of_particles
if self.particle_best_fitnesses[next_idx] < self.particle_best_fitnesses[prev_idx]:
topological_best = self.particles[next_idx]
else:
topological_best = self.particles[prev_idx]
else:
raise Exception()
self.velocities[p] = np.clip(self.velocities[p] * self.interia + cognitive_mix * (self.particle_best_position[p] - particle) + social_mix * (topological_best - particle),-self.max_velocity,self.max_velocity)
self.particles[p] = self.particles[p] + self.velocities[p]
self.particles = self.normalize_particles(self.particles)
#get samples
samples = self.sample(self.particles)
#calculate the fitnesses
fitnesses = self.evaluate_fitness(samples)
#Find new global best (if it exists)
current_best = np.argmin(fitnesses)
if fitnesses[current_best] < self.global_best_fitness:
new_global_best_found = True
self.global_best_position = self.particles[current_best]
self.global_best_fitness = fitnesses[current_best]
self.global_best_sample = samples[current_best]
else:
new_global_best_found = False
personal_bests_found = 0
#Find new personal bests (if they exist)
for i, particle in enumerate(self.particles):
if fitnesses[i] < self.particle_best_fitnesses[i]:
self.particle_best_position[i] = self.calculate_new_best(self.particles[i], samples[i], scaling_factor=self.scaling_factor)
personal_bests_found += 1
self.i += 1
self.log_debug("I: {} Global Best: {} New Global {} New Personals {}/{}, Fitnesses N({},{}), Velocity Mag: {}".format(self.i,self.global_best_fitness,new_global_best_found,personal_bests_found,self.number_of_particles,np.mean(fitnesses),np.std(fitnesses),np.linalg.norm(self.velocities)))
def currently_computed_solution_(self):
if self.global_best_sample is not None:
return self.global_best_sample
else:
return None
def initialize_particles_and_velocities_and_bests(self):
self.global_best_sample = None
#Particles dimensions are particles x output edge dimension x edge probability
#for example, self.particles[0,1,2] is the probability of edge 2 being the first output edge for particle 0
#Velocities are the same dimensions
self.particles = np.random.rand(self.number_of_particles, self.number_of_loop_closures_to_select,len(self.possible_new_edges))
self.velocities = np.clip(np.random.normal(0,scale=1.0,size=self.particles.shape),a_min=-self.max_velocity,a_max=self.max_velocity)
self.particles = self.normalize_particles(self.particles)
self.particle_best_position = self.particles.copy()
initial_samples = self.sample(self.particle_best_position)
self.particle_best_fitnesses = self.evaluate_fitness(initial_samples)
best_particle_idx = np.argmin(self.particle_best_fitnesses)
self.global_best_position = self.particle_best_position[best_particle_idx].copy()
self.global_best_fitness = self.particle_best_fitnesses[best_particle_idx]
self.global_best_sample = initial_samples[best_particle_idx]
def normalize_particles(self, particles):
for i,particle in enumerate(particles):
for j in range(particle.shape[0]):
#TODO may not need to clip
particles[i,j] = np.clip(particles[i,j],0,1)
particles[i,j] = particles[i,j] / np.sum(particles[i,j])
#assert np.sum(particles[i,j]) == 1.0, "Particles should be normalized to 1, they are {}".format(np.sum(particles[i,j]))
return particles
def sample(self, particles):
sampled_solutions = []
for i, particle in enumerate(particles):
solution = []
for j in range(particle.shape[0]):
chosen_edge = choices(range(particle.shape[1]),weights=particles[i,j],k=1)[0]
solution.append(chosen_edge)
assert len(solution) == self.number_of_loop_closures_to_select
sampled_solutions.append(solution)
return np.array(sampled_solutions)
def evaluate_fitness(self, samples):
assert len(samples.shape) == 2, "Samples should be 2d but it's {}, samples: {}".format(samples.shape,samples)
graphs_to_run = []
for cur_sample in samples:
edges = []
edge_attrs = []
for edge_idx in cur_sample:
try:
edges.append(self.possible_new_edges[edge_idx])
edge_attrs.append(self.possible_edge_attrs[edge_idx])
except IndexError:
self.log_debug("IndexError")
edges = np.concatenate((self.current_pose_graph_edges,np.array(edges)))
edge_attrs = np.concatenate((self.current_pose_graph_edge_atrrs,np.array(edge_attrs)))
graphs_to_run.append(
constuct_pytorch_geometric_graph(self.current_pose_graph_nodes, edges,
edge_attrs))
return run_model_on_list_of_graphs(self.gnn_model, graphs_to_run)
def calculate_new_best(self,particle, sample, scaling_factor):
out_best = np.zeros(shape=particle.shape)
for d, dimension in enumerate(particle):
dimension_sum = np.sum(dimension)
for j,probability in enumerate(dimension):
if sample[d] == j:
out_best[d,j] = scaling_factor * probability
else:
out_best[d,j] = probability + (1-scaling_factor) * (dimension_sum - probability)
assert 0 <= out_best[d,j] < 1
return out_best
class DiscreteTabuSearch(SimulatedAnnealing):
def set_params(self, params):
self.initial_solution_finder_iterations = params["tabu"]["initial_solution_finder_iterations"]
self.fallback_solution_finder_iterations = params["tabu"]["fallback_solution_finder_iterations"]
self.max_number_of_loop_closures_to_use_initial_solution = params["tabu"]["max_number_of_loop_closures_to_use_initial_solution"]
self.num_neighbors = params["tabu"]["num_neighbors"]
self.max_tabu_length = params["tabu"]["max_tabu_length"]
def initialize_method(self):
super(DiscreteTabuSearch, self).initialize_method()
self.tabu_list = dict()
#Not used
self.Tmax = 0
self.find_initial_solution()
self.found_initial_solution = True
def undo_move(self, move):
new_edge,old_edge = move
self.state.remove(new_edge)
self.state.append(old_edge)
self.edge_idxs_not_in_solution.append(new_edge)
self.edge_idxs_not_in_solution.remove(old_edge)
def reapply_move(self,move):
new_edge,old_edge = move
self.state.remove(old_edge)
self.state.append(new_edge)
self.edge_idxs_not_in_solution.append(old_edge)
self.edge_idxs_not_in_solution.remove(new_edge)
def step(self):
best_candidate_move = None
best_candidate_fitness = float("inf")
tabu_rejections = 0
for i in range(self.num_neighbors):
cur_move = self.move(pow=3)
cur_energy = self.energy()
if (cur_move not in self.tabu_list):
if (cur_energy < best_candidate_fitness):
best_candidate_fitness = cur_energy
best_candidate_move = cur_move
else:
tabu_rejections += 1
self.undo_move(cur_move)
if best_candidate_move is None:
self.log_warn("Tabu Search Couldn't Make Move")
return None
self.reapply_move(best_candidate_move)
found_new_best = False
if best_candidate_fitness <= self.best_energy:
self.best_state = deepcopy(self.state)
self.best_energy = best_candidate_fitness
found_new_best = True
self.tabu_list[best_candidate_move] = 0
for move, count in self.tabu_list.items():
self.tabu_list[move] += 1
if self.tabu_list[move] > self.max_tabu_length:
del self.tabu_list[move]
self.steps += 1
self.log_debug("S: {} Best: {} Bound: {} Rejected: {}".format(self.steps,found_new_best,self.best_energy,tabu_rejections))
| 24,494 | 45.043233 | 296 | py |
LAMP | LAMP-main/loop_closure/src/subset_algorithms/util.py | import random
import torch
import numpy as np
from torch_geometric.data import Data, DataLoader
import rospy
import operator as op
from functools import reduce
from itertools import repeat as _repeat
from bisect import bisect as _bisect
import operator
import json
def constuct_pytorch_geometric_graph(nodes, edges, edge_attrs=None):
nodes = torch.from_numpy(np.array(nodes, dtype=np.float32))
edges = torch.from_numpy(np.array(edges, dtype=np.int64).T)
#if edge_attrs is not None:
edge_attrs = torch.from_numpy(np.array(edge_attrs, dtype=np.float32))
return Data(x=nodes, edge_index=edges, edge_attr=edge_attrs)
# else:
# return Data(x=nodes, edge_index=edges)
def run_model_on_list_of_graphs(model, graphs):
all_batches = []
for graph in graphs:
batch = torch.zeros(graph.x.shape[0], dtype=int)
all_batches.append(torch.sum(model(graph.x, graph.edge_index, graph.edge_attr, batch)).cpu().detach().numpy())
return np.array(all_batches)
def ncr(n, r):
r = min(r, n - r)
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer // denom # or / in Python 2
#Backport from python 3.7 itertools.accumulate
def accumulate(iterable, func=operator.add, initial=None):
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], initial=100) --> 100 101 103 106 110 115
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
total = initial
if initial is None:
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total
#Backport from python 3.7 random.choices
def choices(population, weights=None, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
n = len(population)
if cum_weights is None:
if weights is None:
_int = int
n += 0.0 # convert to float for a small speed improvement
return [population[_int(random.random() * n)] for i in _repeat(None, k)]
cum_weights = list(accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != n:
raise ValueError('The number of weights does not match the population')
bisect = _bisect
total = cum_weights[-1] + 0.0 # convert to float
hi = n - 1
return [population[bisect(cum_weights, random.random() * total, 0, hi)]
for i in _repeat(None, k)]
def fast_deepcopy(a):
return json.loads(json.dumps(a)) | 2,853 | 33.804878 | 118 | py |
LAMP | LAMP-main/loop_closure/src/gnn/gnn_model.py | import torch
from torch.nn import Linear, ModuleList, BatchNorm1d
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GraphConv, GATConv, SAGEConv, GMMConv, NNConv
from torch_geometric.nn import global_mean_pool, global_add_pool, GlobalAttention
from torch.nn import ModuleList, Embedding,Sequential, ReLU
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
class LoopClosureGNN(torch.nn.Module):
def __init__(self, hidden_channels, num_input_features, num_edge_attr, num_output_features, ends_in_sigmoid=False):
super(LoopClosureGNN, self).__init__()
#torch.manual_seed(12345)
self.node_encoder = Linear(num_input_features, hidden_channels)
self.edge_encoder = Linear(num_edge_attr, hidden_channels)
self.conv1 = GMMConv(hidden_channels, hidden_channels, 4,5)
self.gcs = ModuleList([GMMConv(hidden_channels,hidden_channels,4, 5) for i in range(2)])
#self.conv1 = SAGEConv(num_input_features, hidden_channels)
#self.gcs = ModuleList([SAGEConv(hidden_channels, hidden_channels) for i in range(4)])
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 2)])
self.lin = Linear(hidden_channels, num_output_features)
self.ends_in_sigmoid = ends_in_sigmoid
def forward(self, x, edge_index,edge_attr, batch):
# 1. Obtain node embeddings
#x = self.input_norm(x)
x = self.node_encoder(x)
#edge_attr = self.edge_encoder(edge_attr)
x = self.conv1(x, edge_index,edge_attr)
for gc in self.gcs:
x = x.tanh()
x = gc(x, edge_index,edge_attr)
# 2. Readout layer
x = global_add_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms[1:]):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
if self.ends_in_sigmoid:
x = torch.sigmoid(x)
return x
class LoopClosureGNNGlobalAttention(torch.nn.Module):
def __init__(self, hidden_channels, num_input_features, num_output_features, ends_in_sigmoid=False):
super(LoopClosureGNNGlobalAttention, self).__init__()
#torch.manual_seed(12345)
#self.input_norm = BatchNorm1d(num_input_features)
self.conv1 = GENConv(num_input_features, hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True)
#self.gcs = ModuleList([GraphConv(hidden_channels, hidden_channels) for i in range(4)])
self.gcs = ModuleList([GENConv(hidden_channels,hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True) for i in range(2)])
#self.conv1 = SAGEConv(num_input_features, hidden_channels)
#self.gcs = ModuleList([SAGEConv(hidden_channels, hidden_channels) for i in range(4)])
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 2)])
self.lin = Linear(hidden_channels, num_output_features)
self.ends_in_sigmoid = ends_in_sigmoid
gate_nn = Seq(Lin(hidden_channels, hidden_channels), ReLU(), Lin(hidden_channels, 1))
self.glob = GlobalAttention(gate_nn)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
#x = self.input_norm(x)
x = self.conv1(x, edge_index)
for gc in self.gcs:
x = x.tanh()
x = gc(x, edge_index)
# 2. Readout layer
#x = global_add_pool(x, batch) # [batch_size, hidden_channels]
x = self.glob(x,batch)
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms[1:]):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
if self.ends_in_sigmoid:
x = torch.sigmoid(x)
return x
class LoopClosureUNet(torch.nn.Module):
def __init__(self,hidden_channels,num_features,num_output_features,unet_depth=3):
super(LoopClosureUNet, self).__init__()
#pool_ratios = [2000 / data.num_nodes, 0.5]
self.unet = GraphUNet(num_features, hidden_channels, hidden_channels,
depth=unet_depth)
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 1)])
self.lin = Linear(hidden_channels, num_output_features)
def forward(self,x,edge_index,batch):
# edge_index, _ = dropout_adj(data.edge_index, p=0.2,
# force_undirected=True,
# num_nodes=data.num_nodes,
# training=self.training)
x = F.dropout(x, p=0.92, training=self.training)
x = self.unet(x, edge_index)
# 2. Readout layer
x = global_add_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
return x
class LoopClosurePNA(torch.nn.Module):
def __init__(self,node_features,output_features,deg):
super(LoopClosurePNA, self).__init__()
hidden_dim = 32
self.node_emb = Linear(node_features, hidden_dim)
aggregators = ['mean', 'min', 'max', 'std']
scalers = ['identity', 'amplification', 'attenuation']
self.convs = []
self.batch_norms = []
for _ in range(2):
conv = PNAConv(in_channels=hidden_dim, out_channels=hidden_dim,
aggregators=aggregators, scalers=scalers, deg=deg,
towers=4, pre_layers=1, post_layers=1,
divide_input=False)
self.convs.append(conv)
self.batch_norms.append(BatchNorm1d(hidden_dim))
self.convs = ModuleList(self.convs)
self.batch_norms = ModuleList(self.convs)
# self.mlp = Sequential(Linear(hidden_dim, int(hidden_dim/2)), ReLU(),Linear(int(hidden_dim/2),int(hidden_dim/4)), ReLU(),
# Linear(int(hidden_dim/4), output_features))
self.mlp = Sequential(Linear(hidden_dim, int(hidden_dim/2)), ReLU(), Linear(int(hidden_dim/2), output_features))
def forward(self, x, edge_index, batch):
x = self.node_emb(x)
#edge_attr = self.edge_emb(edge_attr)
for conv, batch_norm in zip(self.convs, self.batch_norms):
#x = F.relu(batch_norm(conv(x, edge_index, edge_attr)))
x = F.relu(batch_norm(conv(x, edge_index),edge_index))
x = global_add_pool(x, batch)
return self.mlp(x)
class DeeperGCN(torch.nn.Module):
def __init__(self, hidden_channels, input_features,output_features,num_layers=2):
super(DeeperGCN, self).__init__()
self.node_encoder = Linear(input_features, hidden_channels)
self.layers = torch.nn.ModuleList()
for i in range(1, num_layers + 1):
conv = GENConv(hidden_channels, hidden_channels, aggr='softmax',
t=1.0, learn_t=True, num_layers=2, norm='layer')
norm = LayerNorm(hidden_channels)
act = ReLU(inplace=True)
layer = DeepGCNLayer(conv, norm, act, block='res+', dropout=0.1,
ckpt_grad=i % 3)
self.layers.append(layer)
self.mlp = Sequential(Linear(hidden_channels, int(hidden_channels/2)), ReLU(),
Linear(int(hidden_channels/2), output_features))
def forward(self, x, edge_index,batch ):
x = self.node_encoder(x)
#edge_attr = self.edge_encoder(edge_attr)
x = self.layers[0].conv(x, edge_index)
for layer in self.layers[1:]:
x = layer(x, edge_index)
x = self.layers[0].act(self.layers[0].norm(x))
x = F.dropout(x, p=0.1, training=self.training)
x = global_add_pool(x, batch)
return self.mlp(x)
| 9,031 | 43.27451 | 152 | py |
LAMP | LAMP-main/loop_closure/script/dataset.py | import itertools
import os
import numpy as np
import torch
from torch_geometric.data import InMemoryDataset, Data
from tqdm import tqdm
import random
class LoopClosureDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None, delete_bad_files=True):
super(LoopClosureDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['closures.dataset']
def download(self):
pass
def process(self):
node_types = set()
bad_files = []
for run in os.listdir(self.root):
if not run in ["raw", "processed"]:
for file in os.listdir(os.path.join(self.root, run, "post")):
if "node" in file:
with open(os.path.join(self.root, run, "post", file)) as f:
for line in f.readlines()[1:]:
try:
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
node_types.add(node_type)
except Exception as e:
print(e)
print(os.path.join(self.root, run, file))
data_list = []
num_distinct_features = 6
num_node_features = (len(node_types) + num_distinct_features)
node_types = sorted(list(node_types))
g_nodes = []
g_edges = []
g_targets = []
for run in tqdm(os.listdir(self.root), "Loading Graphs"):
if run in ["raw", "processed"]:
continue
nodes_pre = dict()
edges_pre = dict()
nodes_post = dict()
edges_post = dict()
for file in os.listdir(os.path.join(self.root, run, "pre")):
epoch = file.split("-")[0]
if "node" in file:
nodes_pre[epoch] = file
else:
edges_pre[epoch] = file
for file in os.listdir(os.path.join(self.root, run, "post")):
epoch = file.split("-")[0]
if "node" in file:
nodes_post[epoch] = file
else:
edges_post[epoch] = file
for graph_epoch, predicted_epoch in tqdm(list(zip(sorted(nodes_pre.keys()), sorted(nodes_post.keys()))),
desc="Loading graphs from run %s" % (run)):
cx_after_solve = 0
cy_after_solve = 0
cz_after_solve = 0
with open(os.path.join(self.root, run, "post", nodes_post[predicted_epoch])) as f:
for line in f.readlines():
if "odom_node" in line:
# TODO this should be the sum of all the errors
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
cx = float(cx)
cy = float(cy)
cz = float(cz)
cx_after_solve += cx
cy_after_solve += cy
cz_after_solve += cz
target_node = node_id
# assert all(map(lambda x: not math.isnan(x), target))
# target = [cx_after_solve, cy_after_solve,
# cz_after_solve]
target = [cx_after_solve + cy_after_solve + cz_after_solve]
g_targets.append(target)
cur_nodes = []
node_ids_to_seq_id = dict()
with open(os.path.join(self.root, run, "pre", nodes_pre[graph_epoch])) as f:
for i, line in enumerate(f.readlines()[1:]):
# try:
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
if "an" in node_type:
continue
# Number of node types + covariance
# node_features = np.zeros((len(node_types) + 6))
node_features = np.zeros((len(node_types) + num_distinct_features))
# node_features = np.zeros((3))
# node_features[6 + node_types.index(node_type)] = 1
node_features[num_distinct_features + node_types.index(node_type)] = 1
# cx = (float(cx)-empirical_node_mean[0])/empirical_node_std[0]
# cy = (float(cy)-empirical_node_mean[1])/empirical_node_std[1]
# cz = (float(cz)-empirical_node_mean[2])/empirical_node_std[2]
cx = float(cx)
cy = float(cy)
cz = float(cz)
node_features[0] = float(cx)
node_features[1] = float(cy)
node_features[2] = float(cz)
node_features[3] = float(x)
node_features[4] = float(y)
node_features[5] = float(z)
cur_nodes.append(node_features)
node_ids_to_seq_id[node_id] = i
# except Exception as e:
# print(f"Node Couldn't parse line {line} due to {e}")
# pass
g_nodes += cur_nodes
cur_edges = []
edge_attrs = []
with open(os.path.join(self.root, run, "post", edges_post[predicted_epoch])) as f:
try:
for line in f.readlines()[1:]:
from_key, to_key, edge_type = line.split(",")
edge_1 = node_ids_to_seq_id[from_key]
edge_2 = node_ids_to_seq_id[to_key]
cur_edges.append((edge_1, edge_2))
edge_attr = np.zeros(4)
edge_attr[int(edge_type) - 1] = 1
edge_attrs.append(edge_attr)
except:
print(run)
g_edges.append(cur_edges)
cur_nodes = torch.from_numpy(np.array(cur_nodes, dtype=np.float32))
cur_edges = torch.from_numpy(np.array(cur_edges, dtype=np.int64).T)
target = torch.from_numpy(np.array([target], dtype=np.float32))
edge_attrs = torch.from_numpy(np.array(edge_attrs, dtype=np.float32))
try:
data = Data(x=cur_nodes, edge_index=cur_edges, y=target, edge_attr=edge_attrs)
#data.run_name = run
data.debug()
data_list.append(data)
except RuntimeError as e:
#print("Skipping dataset {run},{graph_epoch} due to {e}")
print("Skipping %s" % (run))
random.shuffle(data_list)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
class LoopClosureNodeLabelDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None, delete_bad_files=True):
super(LoopClosureNodeLabelDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['closures_nodelevel.dataset']
def download(self):
pass
def process(self):
node_types = set()
bad_files = []
for run in os.listdir(self.root):
if not run in ["raw", "processed"]:
for file in os.listdir(os.path.join(self.root, run, "post")):
if "node" in file:
with open(os.path.join(self.root, run, "post", file)) as f:
for line in f.readlines()[1:]:
try:
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
node_types.add(node_type)
except Exception as e:
print(e)
print(os.path.join(self.root, run, file))
data_list = []
num_distinct_features = 6
num_node_features = (len(node_types) + num_distinct_features)
node_types = sorted(list(node_types))
g_nodes = []
g_edges = []
g_targets = []
for run in tqdm(os.listdir(self.root), "Loading Graphs"):
if run in ["raw", "processed"]:
continue
nodes_pre = dict()
edges_pre = dict()
nodes_post = dict()
edges_post = dict()
for file in os.listdir(os.path.join(self.root, run, "pre")):
epoch = file.split("-")[0]
if "node" in file:
nodes_pre[epoch] = file
else:
edges_pre[epoch] = file
for file in os.listdir(os.path.join(self.root, run, "post")):
epoch = file.split("-")[0]
if "node" in file:
nodes_post[epoch] = file
else:
edges_post[epoch] = file
for graph_epoch, predicted_epoch in tqdm(list(zip(sorted(nodes_pre.keys()), sorted(nodes_post.keys()))),
desc="Loading graphs from run %s" % (run)):
target = []
with open(os.path.join(self.root, run, "post", nodes_post[predicted_epoch])) as f:
for line in f.readlines():
if "odom_node" in line:
# TODO this should be the sum of all the errors
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
cx = float(cx)
cy = float(cy)
cz = float(cz)
target.append([cx,cy,cz])
#target = [cx_after_solve + cy_after_solve + cz_after_solve]
g_targets.append(target)
cur_nodes = []
node_ids_to_seq_id = dict()
with open(os.path.join(self.root, run, "pre", nodes_pre[graph_epoch])) as f:
for i, line in enumerate(f.readlines()[1:]):
# try:
node_id, node_type, x, y, z, qx, qy, qz, qw, cx, cy, cz, cRX, cRY, cRZ, newline = line.split(
",")
if "an" in node_type:
continue
# Number of node types + covariance
# node_features = np.zeros((len(node_types) + 6))
node_features = np.zeros((len(node_types) + num_distinct_features))
# node_features = np.zeros((3))
# node_features[6 + node_types.index(node_type)] = 1
node_features[num_distinct_features + node_types.index(node_type)] = 1
# cx = (float(cx)-empirical_node_mean[0])/empirical_node_std[0]
# cy = (float(cy)-empirical_node_mean[1])/empirical_node_std[1]
# cz = (float(cz)-empirical_node_mean[2])/empirical_node_std[2]
cx = float(cx)
cy = float(cy)
cz = float(cz)
node_features[0] = float(cx)
node_features[1] = float(cy)
node_features[2] = float(cz)
node_features[3] = float(x)
node_features[4] = float(y)
node_features[5] = float(z)
cur_nodes.append(node_features)
node_ids_to_seq_id[node_id] = i
# except Exception as e:
# print(f"Node Couldn't parse line {line} due to {e}")
# pass
g_nodes += cur_nodes
cur_edges = []
edge_attrs = []
with open(os.path.join(self.root, run, "post", edges_post[predicted_epoch])) as f:
try:
for line in f.readlines()[1:]:
from_key, to_key, edge_type = line.split(",")
edge_1 = node_ids_to_seq_id[from_key]
edge_2 = node_ids_to_seq_id[to_key]
cur_edges.append((edge_1, edge_2))
edge_attr = np.zeros(4)
edge_attr[int(edge_type) - 1] = 1
edge_attrs.append(edge_attr)
except:
print(run)
g_edges.append(cur_edges)
cur_nodes = torch.from_numpy(np.array(cur_nodes, dtype=np.float32))
cur_edges = torch.from_numpy(np.array(cur_edges, dtype=np.int64).T)
target = torch.from_numpy(np.array(target, dtype=np.float32))
edge_attrs = torch.from_numpy(np.array(edge_attrs, dtype=np.float32))
try:
data = Data(x=cur_nodes, edge_index=cur_edges, y=target, edge_attr=edge_attrs)
data.run_name = run
data.debug()
data_list.append(data)
except RuntimeError as e:
#print(f"Skipping dataset {run},{graph_epoch} due to {e}")
print("Skipping dataset %s" % (run))
random.shuffle(data_list)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
| 14,544 | 44.311526 | 129 | py |
LAMP | LAMP-main/loop_closure/script/convert_rosbags_to_gnn_dataset.py | #!/usr/bin/env python
# This script converts bagfiles stored at fname to readable datasets for pytorch geometric in out_location
# Need pose_graph_opt.bag
import csv
import rosbag
import os
import yaml
import tqdm
fname = "/media/chris/hdd3/more_bags"
out_location = "/media/chris/hdd3/more_training_data"
def dump_edges(edges, filename):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
row = ["key_from", "key_to", "type"]
writer.writerow(row)
for edge in edges:
row = [edge.key_from, edge.key_to, edge.type]
writer.writerow(row)
def dump_nodes(nodes, filename):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
row = ['key', 'id',
'pose_x', 'pose_y', 'pose_z',
'pose_q_x', 'pose_q_y', 'pose_q_z', 'pose_q_w',
'cov_x', 'cov_y', 'cov_z',
'cov_rX', 'cov_rY', 'cov_rZ']
writer.writerow(row)
for node in nodes:
row = [node.key, node.ID,
node.pose.position.x, node.pose.position.y, node.pose.position.z,
node.pose.orientation.x, node.pose.orientation.y, node.pose.orientation.z, node.pose.orientation.w,
node.covariance[0], node.covariance[7], node.covariance[14],
node.covariance[21], node.covariance[28], node.covariance[35],"aaaaaaaa"]
writer.writerow(row)
def makedirs(path):
cur_path = ""
directories = path.split("/")
for directory in directories[1:]:
cur_path = os.path.join(cur_path,directory)
print(cur_path)
try:
os.makedirs(cur_path)
except OSError as e:
print(e)
def closest_time_before(dict,t):
best_time_diff = None
best_msg = None
for time, msg in dict.items():
if best_time_diff is None or (time < t and t < best_time_diff):
best_time_diff = t
best_msg = msg
return best_msg
for run in tqdm.tqdm(os.listdir(fname), "Bags"):
bags = list(os.listdir(os.path.join(fname,run)))
if len(bags) == 1:
split_bags = False
elif len(bags) == 2:
split_bags = True
else:
print(run)
raise Exception()
# Make file locations
pre_files_location = os.path.join(out_location, run,"pre")
post_files_location = os.path.join(out_location, run,"post")
if os.path.exists(pre_files_location):
print("Skipping: " + fname)
continue
try:
os.removedirs(pre_files_location)
except OSError:
pass
os.makedirs(pre_files_location)
try:
os.removedirs(post_files_location)
except:
pass
os.makedirs(post_files_location)
if not split_bags:
last_unoptimized_message = None
optimized_topic = "/base1/lamp_pgo/optimized_values"
unoptimized_topic = "/base1/lamp/pose_graph_to_optimize"
bag_location = os.path.join(fname,run,bags[0])
try:
pgo_info_dict = yaml.load(rosbag.Bag(bag_location, 'r')._get_yaml_info())
except rosbag.bag.ROSBagUnindexedException:
continue
if not pgo_info_dict['indexed']:
continue
print pgo_info_dict
for topic, msg, t in tqdm.tqdm(rosbag.Bag(bag_location).read_messages(topics=[unoptimized_topic, optimized_topic]), desc="Messages",total=pgo_info_dict["messages"]):
if topic == unoptimized_topic:
last_unoptimized_message = msg
if topic == optimized_topic:
# Dump pre message
dump_edges(last_unoptimized_message.edges, os.path.join(pre_files_location, str(t) + "-edges.csv"))
dump_nodes(last_unoptimized_message.nodes, os.path.join(pre_files_location, str(t) + "-nodes.csv"))
# Dump post messages
dump_edges(msg.edges, os.path.join(post_files_location, str(t) + "-edges.csv"))
dump_nodes(msg.nodes, os.path.join(post_files_location, str(t) + "-nodes.csv"))
else:
pose_graph_bag = os.path.join(fname,run,"pose_graph.bag")
pose_graph_opt_bag = os.path.join(fname,run,"pose_graph_opt.bag")
pg_info_dict = yaml.load(rosbag.Bag(pose_graph_bag, 'r')._get_yaml_info())
print pg_info_dict
pgo_info_dict = yaml.load(rosbag.Bag(pose_graph_opt_bag, 'r')._get_yaml_info())
print pgo_info_dict
last_time = None
for topic,msg,t in tqdm.tqdm(rosbag.Bag(pose_graph_opt_bag).read_messages(),desc="Reading Messages", total=pgo_info_dict["messages"]):
pose_graph_msg = None
for topic_pg,msg_pg,t_pg in rosbag.Bag(pose_graph_bag).read_messages(start_time=last_time):
if t_pg <= t:
pose_graph_msg = msg_pg
last_time = t_pg
else:
break
dump_edges(pose_graph_msg.edges, os.path.join(pre_files_location, str(t) + "-edges.csv"))
dump_nodes(pose_graph_msg.nodes, os.path.join(pre_files_location, str(t) + "-nodes.csv"))
# Dump post messages
dump_edges(msg.edges, os.path.join(post_files_location, str(t) + "-edges.csv"))
dump_nodes(msg.nodes, os.path.join(post_files_location, str(t) + "-nodes.csv"))
| 5,335 | 36.577465 | 173 | py |
LAMP | LAMP-main/loop_closure/script/gnn_model.py | import torch
from torch.nn import Linear, ModuleList, BatchNorm1d
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GraphConv, GATConv, SAGEConv, GMMConv
from torch_geometric.nn import global_mean_pool, global_add_pool, GlobalAttention
from torch.nn import ModuleList, Embedding,Sequential, ReLU
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
class LoopClosureGNN(torch.nn.Module):
def __init__(self, hidden_channels, num_input_features, num_edge_attr, num_output_features, ends_in_sigmoid=False):
super(LoopClosureGNN, self).__init__()
#torch.manual_seed(12345)
self.node_encoder = Linear(num_input_features, hidden_channels)
self.edge_encoder = Linear(num_edge_attr, hidden_channels)
#self.input_norm = BatchNorm1d(num_input_features)
#self.conv1 = SAGEConv(hidden_channels, hidden_channels)
#self.conv1 = GENConv(hidden_channels, hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True)
self.conv1 = GMMConv(hidden_channels, hidden_channels, 4,5)
#self.gcs = ModuleList([SAGEConv(hidden_channels, hidden_channels) for i in range(4)])
#self.gcs = ModuleList([GENConv(hidden_channels,hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True) for i in range(2)])
self.gcs = ModuleList([GMMConv(hidden_channels,hidden_channels,4, 5) for i in range(2)])
#self.conv1 = SAGEConv(num_input_features, hidden_channels)
#self.gcs = ModuleList([SAGEConv(hidden_channels, hidden_channels) for i in range(4)])
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 2)])
self.lin = Linear(hidden_channels, num_output_features)
self.ends_in_sigmoid = ends_in_sigmoid
def forward(self, x, edge_index,edge_attr, batch):
# 1. Obtain node embeddings
#x = self.input_norm(x)
x = self.node_encoder(x)
#edge_attr = self.edge_encoder(edge_attr)
x = self.conv1(x, edge_index,edge_attr)
for gc in self.gcs:
x = x.tanh()
x = gc(x, edge_index,edge_attr)
# 2. Readout layer
x = global_add_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms[1:]):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
if self.ends_in_sigmoid:
x = torch.sigmoid(x)
return x
class LoopClosureGNNGlobalAttention(torch.nn.Module):
def __init__(self, hidden_channels, num_input_features, num_output_features, ends_in_sigmoid=False):
super(LoopClosureGNNGlobalAttention, self).__init__()
#torch.manual_seed(12345)
#self.input_norm = BatchNorm1d(num_input_features)
self.conv1 = GENConv(num_input_features, hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True)
#self.gcs = ModuleList([GraphConv(hidden_channels, hidden_channels) for i in range(4)])
self.gcs = ModuleList([GENConv(hidden_channels,hidden_channels,msg_norm=True,learn_t=True,learn_p=True,learn_msg_scale=True) for i in range(2)])
#self.conv1 = SAGEConv(num_input_features, hidden_channels)
#self.gcs = ModuleList([SAGEConv(hidden_channels, hidden_channels) for i in range(4)])
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 2)])
self.lin = Linear(hidden_channels, num_output_features)
self.ends_in_sigmoid = ends_in_sigmoid
gate_nn = Seq(Lin(hidden_channels, hidden_channels), ReLU(), Lin(hidden_channels, 1))
self.glob = GlobalAttention(gate_nn)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
#x = self.input_norm(x)
x = self.conv1(x, edge_index)
for gc in self.gcs:
x = x.tanh()
x = gc(x, edge_index)
# 2. Readout layer
#x = global_add_pool(x, batch) # [batch_size, hidden_channels]
x = self.glob(x,batch)
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms[1:]):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
if self.ends_in_sigmoid:
x = torch.sigmoid(x)
return x
class LoopClosureUNet(torch.nn.Module):
def __init__(self,hidden_channels,num_features,num_output_features,unet_depth=3):
super(LoopClosureUNet, self).__init__()
#pool_ratios = [2000 / data.num_nodes, 0.5]
self.unet = GraphUNet(num_features, hidden_channels, hidden_channels,
depth=unet_depth)
num_linear = 3
self.lins = ModuleList([Linear(hidden_channels, hidden_channels) for i in range(num_linear)])
self.batch_norms = ModuleList([BatchNorm1d(hidden_channels) for i in range(num_linear + 1)])
self.lin = Linear(hidden_channels, num_output_features)
def forward(self,x,edge_index,batch):
# edge_index, _ = dropout_adj(data.edge_index, p=0.2,
# force_undirected=True,
# num_nodes=data.num_nodes,
# training=self.training)
x = F.dropout(x, p=0.92, training=self.training)
x = self.unet(x, edge_index)
# 2. Readout layer
x = global_add_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
for lin,batch_norm in zip(self.lins,self.batch_norms):
#x = F.dropout(x, p=0.5, training=self.training)
x = batch_norm(x)
x = lin(x)
x = torch.tanh(x)
#x = F.dropout(x, p=0.5, training=self.training)
x = self.batch_norms[-1](x)
x = self.lin(x)
return x
class LoopClosurePNA(torch.nn.Module):
def __init__(self,node_features,output_features,deg):
super(LoopClosurePNA, self).__init__()
hidden_dim = 32
self.node_emb = Linear(node_features, hidden_dim)
aggregators = ['mean', 'min', 'max', 'std']
scalers = ['identity', 'amplification', 'attenuation']
self.convs = []
self.batch_norms = []
for _ in range(2):
conv = PNAConv(in_channels=hidden_dim, out_channels=hidden_dim,
aggregators=aggregators, scalers=scalers, deg=deg,
towers=4, pre_layers=1, post_layers=1,
divide_input=False)
self.convs.append(conv)
self.batch_norms.append(BatchNorm1d(hidden_dim))
self.convs = ModuleList(self.convs)
self.batch_norms = ModuleList(self.convs)
# self.mlp = Sequential(Linear(hidden_dim, int(hidden_dim/2)), ReLU(),Linear(int(hidden_dim/2),int(hidden_dim/4)), ReLU(),
# Linear(int(hidden_dim/4), output_features))
self.mlp = Sequential(Linear(hidden_dim, int(hidden_dim/2)), ReLU(), Linear(int(hidden_dim/2), output_features))
def forward(self, x, edge_index, batch):
x = self.node_emb(x)
#edge_attr = self.edge_emb(edge_attr)
for conv, batch_norm in zip(self.convs, self.batch_norms):
#x = F.relu(batch_norm(conv(x, edge_index, edge_attr)))
x = F.relu(batch_norm(conv(x, edge_index),edge_index))
x = global_add_pool(x, batch)
return self.mlp(x)
class DeeperGCN(torch.nn.Module):
def __init__(self, hidden_channels, input_features,output_features,num_layers=2):
super(DeeperGCN, self).__init__()
self.node_encoder = Linear(input_features, hidden_channels)
self.layers = torch.nn.ModuleList()
for i in range(1, num_layers + 1):
conv = GENConv(hidden_channels, hidden_channels, aggr='softmax',
t=1.0, learn_t=True, num_layers=2, norm='layer')
norm = LayerNorm(hidden_channels)
act = ReLU(inplace=True)
layer = DeepGCNLayer(conv, norm, act, block='res+', dropout=0.1,
ckpt_grad=i % 3)
self.layers.append(layer)
self.mlp = Sequential(Linear(hidden_channels, int(hidden_channels/2)), ReLU(),
Linear(int(hidden_channels/2), output_features))
def forward(self, x, edge_index,batch ):
x = self.node_encoder(x)
#edge_attr = self.edge_encoder(edge_attr)
x = self.layers[0].conv(x, edge_index)
for layer in self.layers[1:]:
x = layer(x, edge_index)
x = self.layers[0].act(self.layers[0].norm(x))
x = F.dropout(x, p=0.1, training=self.training)
x = global_add_pool(x, batch)
return self.mlp(x)
| 9,517 | 44.54067 | 153 | py |
LAMP | LAMP-main/loop_closure/script/offline_refit.py | #!/usr/bin/env python
# This script fits the gnn model used in batching to new training data
from __future__ import division
import pickle
import os
import math
import torch
from torch_geometric.datasets import TUDataset
from torch_geometric.data import InMemoryDataset, Data, DataLoader
import numpy as np
import torch
from torch_geometric.transforms import Compose, LocalDegreeProfile, OneHotDegree, Cartesian, Distance
from tqdm import tqdm
import matplotlib.pyplot as plt
from dataset import LoopClosureDataset, LoopClosureNodeLabelDataset
from gnn_model import LoopClosureGNN
#from torch.utils.tensorboard import SummaryWriter
from torch_geometric.utils import degree
from sklearn.metrics import mean_squared_error
root = "/home/chris/current_training_data/"
save_name = "./model.pkl"
cpu_save_name = "./model-cpu.pkl"
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# device = "cpu"
dataset = LoopClosureDataset("/media/chris/hdd3/more_training_data")
val_holdout = int(len(dataset) / 4)
train_dataset = dataset
train_dataset = dataset[:-val_holdout]
validation_dataset = dataset[-val_holdout:]
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
validation_loader = DataLoader(validation_dataset, batch_size=32, shuffle=True)
print("Dataset Size: %d, Train Size %d, Val Size %d" % (len(dataset), len(train_dataset), len(validation_dataset)))
def train(model):
model.train()
for data in train_loader: # Iterate in batches over the training dataset.
data.to(device)
out = model(data.x, data.edge_index,data.edge_attr, data.batch) # Perform a single forward pass.
loss = criterion(out, data.y) # Compute the loss.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
optimizer.zero_grad() # Clear gradients.
def test(loader, model):
model.eval()
predictions, actuals = list(), list()
for i, data in enumerate(loader):
data.to(device)
# evaluate the model on the test set
yhat = model(data.x, data.edge_index,data.edge_attr,data.batch)
# retrieve numpy array
yhat = yhat.cpu().detach().numpy()
actual = data.y.cpu().detach().numpy()
# actual = actual.reshape((len(actual), 3))
# store
predictions.append(yhat)
actuals.append(actual)
predictions, actuals = np.vstack(predictions), np.vstack(actuals)
# calculate mse
mse = mean_squared_error(actuals, predictions, multioutput='raw_values')
return np.sqrt(mse)
if __name__ == "__main__":
model = LoopClosureGNN(64, dataset.num_node_features,dataset.num_edge_features,dataset.num_classes).to(device)
continue_training = True
should_train = False
val_skip = 1
try:
if continue_training:
model.load_state_dict(torch.load(save_name))
except Exception as e:
print(e)
if should_train:
optimizer = torch.optim.Adam(model.parameters(), lr=0.0005, weight_decay=0.005)
criterion = torch.nn.MSELoss()
pbar = tqdm(range(1, 5 * (10 ** 2)), "Training Epochs")
#writer = SummaryWriter()
best_test_accuracy = float("inf")
saving = False
for epoch in pbar:
train(model)
train_acc = test(train_loader, model)
test_acc = test(validation_loader, model)
pbar.set_postfix({"Training Accuracy": train_acc, "Testing Accuracy": test_acc, "Saving": saving, "Best Test Accuracy": best_test_accuracy})
# for x in range(test_acc.shape[0]):
# writer.add_scalar("Accuracy/train" + str(x), train_acc[x], epoch)
# writer.add_scalar("Accuracy/test" + str(x), test_acc[x], epoch)
if sum(test_acc) < best_test_accuracy:
saving = True
best_test_accuracy = sum(test_acc)
with open(save_name, "wb") as f:
torch.save(model.state_dict(), f)
else:
saving = False
model.load_state_dict(torch.load(save_name))
model.eval()
correct = 0
total = 0
differences = []
predictions = []
for i, graph_1 in tqdm(list(enumerate(validation_dataset)),
desc="Compute Ordering"): # Iterate in batches over the training
for graph_2 in validation_dataset[i::val_skip]:
#if graph_1.run_name == graph_2.run_name:
target_sum_1 = torch.sum(graph_1.y)
target_sum_2 = torch.sum(graph_2.y)
#graphs that are super close who cares
if np.abs((target_sum_1 - target_sum_2).cpu().detach().numpy()) < 0.0001:
continue
graph_1.to(device)
batch = torch.zeros(graph_1.x.shape[0], dtype=int).to(device)
pred_sum_1 = torch.sum(model(graph_1.x, graph_1.edge_index, graph_1.edge_attr,batch))
prediction = pred_sum_1.cpu().detach().numpy()
graph_2.to(device)
batch = torch.zeros(graph_2.x.shape[0], dtype=int).to(device)
pred_sum_2 = torch.sum(model(graph_2.x, graph_2.edge_index,graph_2.edge_attr,batch))
diff = (target_sum_1 - target_sum_2) - (pred_sum_1 - pred_sum_2)
differences.append(diff.cpu().detach().numpy())
if (target_sum_1 <= target_sum_2 and pred_sum_1 <= pred_sum_2) or (
target_sum_1 > target_sum_2 and pred_sum_1 > pred_sum_2):
correct += 1
total += 1
print("Ordering: Correct %f Total %f Accuracy %f" % (correct, total, correct/total))
print("MSE: %f" % (test(validation_loader,model)))
plt.figure()
plt.hist(np.array(differences),bins=50)
plt.title("Difference histogram")
plt.figure()
plt.hist(np.array(predictions),bins=50)
plt.title("Predictions")
plt.show() | 5,887 | 36.987097 | 152 | py |
CMUDeepLens | CMUDeepLens-master/deeplens/base.py | # This file contains the base class for a Lasagne model
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
from random import shuffle
import time
import numpy as np
import cPickle as pickle
import theano
import theano.tensor as T
import lasagne
from keras.preprocessing.image import ImageDataGenerator
from lasagne.utils import floatX
from lasagne.layers import InputLayer, NonlinearityLayer, DenseLayer
from lasagne.layers import get_output, get_all_params, get_output_shape
from lasagne.nonlinearities import sigmoid, rectify, elu, tanh, identity
from lasagne.updates import adam, total_norm_constraint
from lasagne.objectives import binary_accuracy, binary_crossentropy
from .objectives import weighted_sigmoid_binary_crossentropy
class BaseLasagneClassifier(BaseEstimator, ClassifierMixin):
"""
Base class for a Lasagne classifier model
Expects implementations of the following functions and
attributes:
self._build(n_x,n_y): Builds the network structure
self._fit(X, y): Fit the model
"""
def __init__(self, pos_weight=1,
n_epochs=100,
batch_size=32,
learning_rate=0.001,
learning_rate_drop=0.1,
learning_rate_steps=3,
output_nbatch=100,
val_nepoch=5):
"""
Initialisation
"""
self.batch_size = batch_size
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self.output_nbatch = output_nbatch
self.pos_weight = pos_weight
self.val_nepoch = val_nepoch
self.learning_rate_drop = learning_rate_drop
self.learning_rate_steps = learning_rate_steps
self._x = T.tensor4('x')
self._y = T.matrix('y')
self._lr = T.scalar(name='learning_rate')
self._network = None
def _model_definition(self, net):
"""
Function which defines the model from the provided source layer
to the output of the DenseLayer, before the dense layer !
"""
return net
def _build(self, X, y):
"""
Builds the network and associated training functions, for the specific
shapes of the inputs
"""
n_x = X.shape[-1]
n_y = y.shape[-1]
n_c = X.shape[1]
# Defining input layers
self.l_x = InputLayer(shape=(self.batch_size, n_c, n_x, n_x),
input_var=self._x, name='x')
self.l_y = InputLayer(shape=(self.batch_size, n_y),
input_var=self._y, name='y')
net = self._model_definition(self.l_x)
# Output classifier
out = DenseLayer(net, num_units=n_y, nonlinearity=identity)
self._network = NonlinearityLayer(out, nonlinearity=sigmoid)
# Compute network loss
q, p = get_output([out, self.l_y], inputs={self.l_x:self._x, self.l_y:self._y})
# Define loss function
loss = weighted_sigmoid_binary_crossentropy(q, p, self.pos_weight)
# Average over batch
loss = loss.mean()
# Get trainable parameters and generate updates
params = get_all_params([self._network], trainable=True)
grads = T.grad(loss, params)
updates = adam(grads, params, learning_rate=self._lr)
self._trainer = theano.function([self._x, self._y, self._lr], [loss], updates=updates)
# Get detection probability from the network
qdet = get_output(self._network, inputs={self.l_x: self._x}, deterministic=True)
self._output = theano.function([self._x], qdet)
def fit(self, X, y, Xval=None, yval=None):
"""
Fit the model to the data.
Parameters
----------
X: array_like of shape (n_samples, n_channels, n_x, n_x)
Training data.
y: array_like (n_samples, n_conditional_features), optional
Conditional data.
Returns
-------
self : Generator
The fitted model
"""
# Creates a new network and associated functions if not incremental
if self._network is None:
self._build(X, y)
niter = 0
train_err = 0
# Defines a preprocessing step
datagen = ImageDataGenerator(rotation_range=90,
zoom_range=[0.9,1],
horizontal_flip=True,
vertical_flip=True,
fill_mode='wrap',
dim_ordering='th')
lr = self.learning_rate
# Loop over training epochs
for i in range(self.n_epochs):
print("Starting Epoch : %d"%i)
if (Xval is not None) and (yval is not None) and (i % self.val_nepoch == 0) and (i > 0):
pur, comp = self.eval_purity_completeness(Xval, yval)
print("Iteration : %d -> [Validation] Purity: %f ; Completeness: %f"%(niter, pur, comp))
nval = Xval.shape[0]
pur, comp = self.eval_purity_completeness(X[0:nval], y[0:nval])
print("Iteration : %d -> [Training] Purity: %f ; Completeness: %f"%(niter, pur, comp))
start_time = time.time()
batches = datagen.flow(X, y,
batch_size=self.batch_size,
shuffle=True)
# Loop over batches
for b in range(X.shape[0] / self.batch_size):
xdata, ydata = batches.next()
# One iteration of training
err, = self._trainer(floatX(xdata), floatX(ydata), floatX(lr))
train_err += err
niter += 1
if (niter % self.output_nbatch) == 0:
print("Iteration : %d -> Training loss: %f"%(niter, train_err / (self.output_nbatch)))
train_err = 0
print("Epoch took %f s"%(time.time() - start_time))
start_time = time.time()
# Lower the learning rate if required
if i % (self.n_epochs / self.learning_rate_steps) == 0 and i > 0:
lr *= self.learning_rate_drop
print("Decreasing learning rate to:" + str(lr))
return self
def save(self, filename):
"""
Exports the model parameters to file
"""
check_is_fitted(self, "_network")
all_values = lasagne.layers.get_all_param_values(self._network)
params =[self.batch_size,
self.n_epochs,
self.learning_rate,
self.pos_weight]
all_params = [params, all_values]
f = file(filename, 'wb')
print("saving to " + filename + "...")
pickle.dump(all_params, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def load(self, filename, X,y ):
"""
Load the network parameter from file
"""
print("loading from " + filename + "...")
f = file(filename, 'rb')
all_params = pickle.load(f)
f.close()
p, all_values = all_params
# Extracts parameters
self.batch_size, self.n_epochs, self.learning_rate, self.pos_weight = p
self._build(X,y)
# Rebuild the network and set the weights
lasagne.layers.set_all_param_values(self._network, all_values)
print("Model loaded")
def predict_proba(self, X):
"""
Returns probability estimates for X
"""
check_is_fitted(self, "_network")
res = []
nsamples = X.shape[0]
# Process data using batches, for optimisation and memory constraints
for i in range(int(nsamples/self.batch_size)):
q = self._output(floatX(X[i*self.batch_size:(i+1)*self.batch_size]))
res.append(q)
if nsamples % (self.batch_size) > 0 :
i = int(nsamples/self.batch_size)
ni = nsamples % (self.batch_size)
xdata = np.zeros((self.batch_size,) + X.shape[1:])
xdata[:ni] = X[i*self.batch_size:]
q = self._output(floatX(xdata))
res.append(q[:ni])
# Concatenate processed data
q = np.concatenate(res)
return q
def predict(self, X, threshold=0.5):
"""
Predict class of X
"""
check_is_fitted(self, "_network")
q = self.predict_proba(X)
upper, lower = 1, 0
return np.where(q > threshold, upper, lower)
def eval_purity_completeness(self, X, y, threshold=0.5):
"""
Evaluate the model purity and completeness using the following definitions
Purity = N(true positive) / [N(true positive) + N(false positive)]
Compl. = N(true positive) / [N(true positive) + N(false negative)]
"""
check_is_fitted(self, "_network")
p = self.predict(X, threshold)
n_fp = np.sum(p * (y == 0)).astype('float32')
n_tp = np.sum(p * y).astype('float32')
n_fn = np.sum((p == 0) * (y == 1)).astype('float32')
pur = n_tp / ( n_tp + n_fp )
comp= n_tp / ( n_tp + n_fn )
return pur, comp
def eval_tpr_fpr(self, X, y, threshold=0.5):
"""
Evaluates the performance of the model using the true and false positive
rates as defined by the challenge
TPR = N(true positive) / [N(true positive) + N(false negative)]
FPR = N(false positive) / [N(false positive) + N(true negative)]
"""
check_is_fitted(self, "_network")
p = self.predict(X, threshold)
n_fp = np.sum(p * (y == 0)).astype('float32')
n_tp = np.sum(p * y).astype('float32')
n_tn = np.sum((p == 0) * (y == 0)).astype('float32')
n_fn = np.sum((p == 0) * (y == 1)).astype('float32')
n_p = np.sum(y).astype('float32')
n_f = np.sum(y == 0).astype('float32')
tpr = n_tp / n_p
fpr = n_fp / n_f
print n_fp, n_tp, n_p, n_f
return tpr, fpr
def eval_ROC(self, X, y):
"""
Computes the ROC curve of model
"""
check_is_fitted(self, "_network")
q = np.reshape(self.predict_proba(X), (-1, 1))
t = np.linspace(0,1,1000)
upper, lower = 1, 0
p = np.where(q > t, upper, lower)
n_fp = np.sum(p * (y == 0), axis=0).astype('float32')
n_tp = np.sum(p * y, axis=0).astype('float32')
tpr = n_tp / np.sum(y).astype('float32')
fpr = n_fp / np.sum( y == 0).astype('float32')
return tpr, fpr, t
| 10,704 | 31.34139 | 106 | py |
xcit | xcit-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
The main training/evaluation loop
Modified from: https://github.com/facebookresearch/deit
"""
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import utils
import xcit
def get_args_parser():
parser = argparse.ArgumentParser('XCiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=400, type=int)
# Model parameters
parser.add_argument('--model', default='xcit_s_12', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET',
'INAT', 'INAT19', 'CARS', 'FLOWERS',
'IMNET22k'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--test-freq', default=1, type=int, help='Number of epochs between \
validation runs.')
parser.add_argument('--full_crop', action='store_true', help='use crop_ratio=1.0 instead of the\
default 0.875 (Used by CaiT).')
parser.add_argument("--pretrained", default=None, type=str, help='Path to pre-trained checkpoint')
parser.add_argument('--surgery', default=None, type=str, help='Path to checkpoint to copy the \
patch projection from. \
Can improve stability for very \
large models.')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None
)
if args.pretrained:
if args.pretrained.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.pretrained, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.pretrained, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
model.load_state_dict(checkpoint_model, strict=True)
model.to(device)
if args.surgery:
checkpoint = torch.load(args.surgery, map_location='cpu')
checkpoint_model = checkpoint['model']
patch_embed_weights = {key.replace("patch_embed.", ""): value for key,
value in checkpoint['model'].items() if 'patch_embed' in key}
model.patch_embed.load_state_dict(patch_embed_weights)
for p in model.patch_embed.parameters():
p.requires_grad = False
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
resume_path = os.path.join(output_dir, 'checkpoint.pth')
if args.resume and os.path.exists(resume_path):
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
print("Loading from checkpoint ...")
checkpoint = torch.load(resume_path, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
surgery=args.surgery
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
if (epoch % args.test_freq == 0) or (epoch == args.epochs - 1):
test_stats = evaluate(data_loader_val, model, device)
if test_stats["acc1"] >= max_accuracy:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'args': args,
}, os.path.join(output_dir, 'best_model.pth'))
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('XCiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 21,140 | 46.507865 | 119 | py |
xcit | xcit-main/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
Modified from: https://github.com/facebookresearch/deit
"""
import torch
from torch.nn import functional as F
import torch.nn as nn
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
if isinstance(outputs, tuple):
base_loss = self.base_criterion(outputs, labels, inputs)
else:
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,977 | 40.361111 | 114 | py |
xcit | xcit-main/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
Modified from: https://github.com/facebookresearch/deit
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True, surgery=None):
model.train(set_training_mode)
if surgery:
model.module.patch_embed.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for batch in metric_logger.log_every(data_loader, print_freq, header):
samples, targets = batch[0], batch[1]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images, target = batch[0], batch[1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 3,705 | 33.635514 | 98 | py |
xcit | xcit-main/hubconf.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from xcit import *
dependencies = ["torch", "torchvision", "timm"]
| 136 | 21.833333 | 47 | py |
xcit | xcit-main/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def _find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
| 7,417 | 28.436508 | 94 | py |
xcit | xcit-main/xcit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implementation of Cross-Covariance Image Transformer (XCiT)
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
import math
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import _cfg, Mlp
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the
"Attention is all of Need" paper. The implementation builds on DeTR code
https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, B, H, W):
mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Sequential(
nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
),
nn.SyncBatchNorm(out_planes)
)
class ConvPatchEmbed(nn.Module):
""" Image to Patch Embedding using multiple convolutional layers
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size[0] == 16:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 8, 2),
nn.GELU(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size[0] == 8:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise("For convolutional projection, patch size has to be in [8, 16]")
def forward(self, x, padding_size=None):
B, C, H, W = x.shape
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows
to augment the implicit communcation performed by the block diagonal scatter attention.
Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
drop=0., kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
self.act = act_layer()
self.bn = nn.SyncBatchNorm(in_features)
self.conv2 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttention(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
qc = q[:, :, 0:1] # CLS token
attn_cls = (qc * k).sum(dim=-1) * self.scale
attn_cls = attn_cls.softmax(dim=-1)
attn_cls = self.attn_drop(attn_cls)
cls_tkn = (attn_cls.unsqueeze(2) @ v).transpose(1, 2).reshape(B, 1, C)
cls_tkn = self.proj(cls_tkn)
x = torch.cat([self.proj_drop(cls_tkn), x[:, 1:]], dim=1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=None,
tokens_norm=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS
self.tokens_norm = tokens_norm
def forward(self, x, H, W, mask=None):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
if self.tokens_norm:
x = self.norm2(x)
else:
x[:, 0:1] = self.norm2(x[:, 0:1])
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
""" Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted
sum. The weights are obtained from the (softmax normalized) Cross-covariance
matrix (Q^T K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
num_tokens=196, eta=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H, W):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class XCiT(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
cls_attn_layers=2, use_pos=True, patch_proj='linear', eta=None, tokens_norm=False):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
patch_size=patch_size)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
norm_layer=norm_layer, num_tokens=num_patches, eta=eta)
for i in range(depth)])
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer,
eta=eta, tokens_norm=tokens_norm)
for i in range(cls_attn_layers)])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.pos_embeder = PositionalEncodingFourier(dim=embed_dim)
self.use_pos = use_pos
# Classifier head
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos:
pos_encoding = self.pos_embeder(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x, Hp, Wp)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x, Hp, Wp)
x = self.norm(x)[:, 0]
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
if self.training:
return x, x
else:
return x
# Patch size 16x16 models
@register_model
def xcit_nano_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=128, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=False, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_medium_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_large_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
# Patch size 8x8 models
@register_model
def xcit_nano_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=128, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=False, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_medium_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_large_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
| 20,085 | 36.473881 | 100 | py |
xcit | xcit-main/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Modified from: https://github.com/facebookresearch/deit
"""
import os
import json
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR100':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'CIFAR10':
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform)
nb_classes = 10
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'CARS':
root = os.path.join(args.data_path, 'train' if is_train else 'test')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 196
elif args.data_set == 'FLOWERS':
root = os.path.join(args.data_path, 'train' if is_train else 'test')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 102
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if args.full_crop:
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
transformations = {}
transformations = transforms.Compose(
[transforms.Resize(args.input_size, interpolation=3),
transforms.CenterCrop(args.input_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
return transformations
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 5,145 | 36.838235 | 105 | py |
xcit | xcit-main/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Copied from: https://github.com/facebookresearch/deit
"""
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,354 | 36.380952 | 103 | py |
xcit | xcit-main/semantic_segmentation/backbone/xcit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Object detection and instance segmentation with XCiT backbone
Based on mmseg, timm and DeiT code bases
https://github.com/open-mmlab/mmsegmentation
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.vision_transformer import _cfg, Mlp
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from mmcv.runner import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.models.builder import BACKBONES
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the
"Attention is all of Need" paper. The implementation builds on DeTR code
https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, B, H, W):
mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Sequential(
nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
),
nn.SyncBatchNorm(out_planes)
)
class ConvPatchEmbed(nn.Module):
""" Image to Patch Embedding using multiple convolutional layers
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size[0] == 16:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 8, 2),
nn.GELU(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size[0] == 8:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise("For convolutional projection, patch size has to be in [8, 16]")
def forward(self, x, padding_size=None):
B, C, H, W = x.shape
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows
to augment the implicit communcation performed by the block diagonal scatter attention.
Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
drop=0., kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
self.act = act_layer()
self.bn = nn.SyncBatchNorm(in_features)
self.conv2 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttention(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
qc = q[:, :, 0:1] # CLS token
attn_cls = (qc * k).sum(dim=-1) * self.scale
attn_cls = attn_cls.softmax(dim=-1)
attn_cls = self.attn_drop(attn_cls)
cls_tkn = (attn_cls.unsqueeze(2) @ v).transpose(1, 2).reshape(B, 1, C)
cls_tkn = self.proj(cls_tkn)
x = torch.cat([self.proj_drop(cls_tkn), x[:, 1:]], dim=1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=None,
tokens_norm=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS
self.tokens_norm = tokens_norm
def forward(self, x, H, W, mask=None):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
if self.tokens_norm:
x = self.norm2(x)
else:
x[:, 0:1] = self.norm2(x[:, 0:1])
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
""" Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted
sum. The weights are obtained from the (softmax normalized) Cross-covariance
matrix (Q^T K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
num_tokens=196, eta=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H, W):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
@BACKBONES.register_module()
class XCiT(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=80, embed_dim=768,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
cls_attn_layers=2, use_pos=True, eta=None, tokens_norm=False,
out_indices=[3, 5, 7, 11]):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
out_indices: (list) Indices of layers from which FPN features are extracted
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
patch_size=patch_size)
num_patches = self.patch_embed.num_patches
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
norm_layer=norm_layer, num_tokens=num_patches, eta=eta)
for i in range(depth)])
self.pos_embeder = PositionalEncodingFourier(dim=embed_dim)
self.use_pos = use_pos
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
pos_encoding = self.pos_embeder(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
if self.use_pos:
x = x + pos_encoding
x = self.pos_drop(x)
features = []
for i, blk in enumerate(self.blocks):
x = blk(x, Hp, Wp)
if i in self.out_indices:
xp = x.permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp)
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, x):
x = self.forward_features(x)
return x
| 16,958 | 36.854911 | 100 | py |
xcit | xcit-main/semantic_segmentation/tools/test.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Testing script modified from
https://github.com/open-mmlab/mmsegmentation
"""
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.utils import DictAction
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from backbone import xcit
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
efficient_test = False
if args.eval_options is not None:
efficient_test = args.eval_options.get('efficient_test', False)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
efficient_test)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect, efficient_test)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
| 5,618 | 34.789809 | 79 | py |
xcit | xcit-main/semantic_segmentation/tools/train.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Training script modified from
https://github.com/open-mmlab/mmsegmentation
"""
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from backbone import xcit
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 5,885 | 33.22093 | 79 | py |
xcit | xcit-main/detection/backbone/xcit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Object detection and instance segmentation with XCiT backbone
Based on mmdet, timm and DeiT code bases
https://github.com/open-mmlab/mmdetection
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.vision_transformer import _cfg, Mlp
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from mmdet.models.builder import BACKBONES
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the
"Attention is all of Need" paper. The implementation builds on DeTR code
https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, B, H, W):
mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Sequential(
nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
),
nn.SyncBatchNorm(out_planes)
)
class ConvPatchEmbed(nn.Module):
""" Image to Patch Embedding using multiple convolutional layers
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size[0] == 16:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 8, 2),
nn.GELU(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size[0] == 8:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise("For convolutional projection, patch size has to be in [8, 16]")
def forward(self, x, padding_size=None):
B, C, H, W = x.shape
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows
to augment the implicit communcation performed by the block diagonal scatter attention.
Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
drop=0., kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
self.act = act_layer()
self.bn = nn.SyncBatchNorm(in_features)
self.conv2 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttention(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
qc = q[:, :, 0:1] # CLS token
attn_cls = (qc * k).sum(dim=-1) * self.scale
attn_cls = attn_cls.softmax(dim=-1)
attn_cls = self.attn_drop(attn_cls)
cls_tkn = (attn_cls.unsqueeze(2) @ v).transpose(1, 2).reshape(B, 1, C)
cls_tkn = self.proj(cls_tkn)
x = torch.cat([self.proj_drop(cls_tkn), x[:, 1:]], dim=1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=None,
tokens_norm=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS
self.tokens_norm = tokens_norm
def forward(self, x, H, W, mask=None):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
if self.tokens_norm:
x = self.norm2(x)
else:
x[:, 0:1] = self.norm2(x[:, 0:1])
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
""" Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted
sum. The weights are obtained from the (softmax normalized) Cross-covariance
matrix (Q^T K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
num_tokens=196, eta=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H, W):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
@BACKBONES.register_module()
class XCiT(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=80, embed_dim=768,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
cls_attn_layers=2, use_pos=True, eta=None, tokens_norm=False,
out_indices=[3, 5, 7, 11]):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
out_indices: (list) Indices of layers from which FPN features are extracted
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
patch_size=patch_size)
num_patches = self.patch_embed.num_patches
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
norm_layer=norm_layer, num_tokens=num_patches, eta=eta)
for i in range(depth)])
self.pos_embeder = PositionalEncodingFourier(dim=embed_dim)
self.use_pos = use_pos
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
pos_encoding = self.pos_embeder(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
if self.use_pos:
x = x + pos_encoding
x = self.pos_drop(x)
features = []
for i, blk in enumerate(self.blocks):
x = blk(x, Hp, Wp)
if i in self.out_indices:
xp = x.permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp)
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, x):
x = self.forward_features(x)
return x
| 16,955 | 36.848214 | 100 | py |
xcit | xcit-main/detection/tools/test.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Testing script modified from
https://github.com/open-mmlab/mmdetection
"""
import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector, build_backbone
from backbone import xcit
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 8,846 | 37.633188 | 79 | py |
xcit | xcit-main/detection/tools/train.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Training script modified from
https://github.com/open-mmlab/mmdetection
"""
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
from backbone import xcit
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 7,064 | 35.045918 | 79 | py |
psi_2022 | psi_2022-main/plotting.py | """
Use with saved model.
Produces the current sweep and latent space plots
"""
from src.models import DIVA
from src.data import MemMapDataset_O
from src.data._utils import get_dataloaders
from src.common.utils import load_model
from src.common.physics_approximations import *
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
MP_names_JET = ['BTF', 'D_tot', 'IpiFP', 'PNBI_TOT', 'P_OH', 'PICR_TOT', 'k', 'delRoben', 'delRuntn', 'ahor', 'Rgeo', 'q95', 'Vol', 'elm_timings']
def main(model_name):
state_dict, hparams, dataset = load_model(model_name)
model = DIVA(**hparams)
model.load_state_dict(state_dict)
model.double()
current_sweep_plot(model, dataset)
latent_space_plot(model, dataset)
def latent_space_plot(model, dataset):
Z_MACH, Z_STOCH = [], []
for idx in range(dataset.total_num_pulses // 2):
sample_profs, sample_mps = torch.from_numpy(dataset.data['profs'][idx].copy()), torch.from_numpy(dataset.data['mps'][idx].copy())
sample_profs_norm, sample_mps_norm = dataset.norm_profiles(sample_profs), dataset.norm_mps(sample_mps)
with torch.no_grad():
_,z_mach, z_stoch, *_ = model.prof2z(sample_profs_norm)
Z_MACH.extend(z_mach)
Z_STOCH.extend(z_stoch)
Z_MACH, Z_STOCH = torch.vstack(Z_MACH), torch.vstack(Z_STOCH)
image_res = 512
sample_size = image_res ** 2 # 2D
r1, r2 = -5, 5
a, b = sample_size, 2
ld_1, ld_2 = 0, 2
range_xy = torch.linspace(start=r1, end=r2, steps=image_res)
range_xy = torch.cartesian_prod(range_xy, range_xy)
range_imagecoord = torch.linspace(0, image_res-1, steps=image_res, dtype=torch.int32) # so we can easily go back
range_imagecoord = torch.cartesian_prod(range_imagecoord, range_imagecoord)
z_mach_mean, z_stoch_mean = Z_MACH.mean(0), Z_STOCH.mean(0)
z_mach_sample, z_stoch_sample = torch.tile(z_mach_mean, (sample_size, 1)), torch.tile(z_stoch_mean, (sample_size, 1))
z_mach_sample[:, ld_1] = range_xy[:, 0]
z_mach_sample[:, ld_2] = range_xy[:, 1]
image_array = np.zeros((image_res, image_res))
with torch.no_grad():
z_conditional = torch.cat((z_mach_sample, z_stoch_sample), 1)
out_profs = model.z2prof(z_conditional)
out_mps = model.z2mp(z_mach_sample)
sample_profs, sample_mps = dataset.denorm_profiles(out_profs), dataset.denorm_mps(out_mps)
sample_teseps, sample_neseps, sample_rseps = find_tesep(sample_profs)
image_array = np.zeros((image_res, image_res))
for i in range(range_imagecoord.shape[0]):
_x, _y = range_imagecoord[i]
_y = image_res - 1 - _y # (0, 0) for img are on top left so reverse
image_array[_y, _x] = sample_neseps[i]
sample_zx, sample_zy = 2, 0 #The star on th graph
data_sample = torch.tensor([sample_zx, sample_zy])
min_i = -1
min_dist = 100000
for i in range(range_xy.shape[0]):
a = data_sample.cpu().numpy()
b = range_xy[i].cpu().numpy()
dist = np.linalg.norm(a-b)
if dist < min_dist:
min_dist = dist
min_i = i
sample_1 = min_i
fig, ls_ax,= plt.subplots(constrained_layout=True)
cmap = mpl.cm.plasma
norm = mpl.colors.Normalize(vmin=0, vmax=1e20)
""" LATENT SPACE PLOT """
cax = ls_ax.imshow(np.rot90(image_array, 3), extent=[r1, r2, r1, r2], cmap=cmap, norm=norm, interpolation='spline36')
fig.colorbar(cax, ax=ls_ax, label='Inferred $n_e^{sep}$ [m$^{-3}$]', location='left')
ls_ax.set_xlabel('Latent Dimension 4')
ls_ax.set_ylabel('Latent Dimension 6')
plt.show()
def current_sweep_plot(model, dataset):
MEAN_MP = []
for idx in range(dataset.total_num_pulses // 2):
sample_profs, sample_mps = torch.from_numpy(dataset.data['profs'][idx].copy()), torch.from_numpy(dataset.data['mps'][idx].copy())
MEAN_MP.append(sample_mps)
MEAN_MP = torch.vstack(MEAN_MP).mean(0)
N_SAMPLES = 1000
current_sweep = torch.linspace(1e6, 5e6, N_SAMPLES)
MP_IN = torch.tile(MEAN_MP, (N_SAMPLES, 1))
MP_IN[:, 2] = current_sweep
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=0, vmax=6e6)
with torch.no_grad():
out_profs_norm, _, _ = model.inference(dataset.norm_mps(MP_IN), from_mean=False)
out_profs = dataset.denorm_profiles(out_profs_norm)
fig = plt.figure()
for k, sample in enumerate(out_profs):
plt.plot(sample[0], color=cmap(norm(current_sweep[k])))
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),orientation='horizontal', label='$I_P$ [MA]')
plt.show()
if __name__ == '__main__':
model_name = 'DIVA.pth'
main(model_name) | 4,731 | 36.555556 | 146 | py |
psi_2022 | psi_2022-main/train.py | from src.models import DIVA
from src.data import MemMapDataset_O
from src.data._utils import initialize_dataloaders
from src.common.utils import save_model
from typing import List
import os
import timeit
import torch
import numpy as np
from tqdm import tqdm
""" Training Paramters """
EPOCHS: int = 100 # Around 50 it starts getting okay
BATCH_SIZE: int = 512 # 512
LR: float = 0.01 # 0.01
""" Model Paramters """
MACH_LATENT_DIM:int = 10 # 8
STOCH_LATENT_DIM:int = 3 # 3
CONV_FILTER_SIZES: List[int] = [2, 4, 6] # [2, 4, 6]
MP_REG_LAYER_WIDTH: int = 20 # 40
MP_REG_LAYER_DEPTH: int = 4 # 6
MP_REG_LAYER_SIZES: List[int] = [MP_REG_LAYER_WIDTH]*MP_REG_LAYER_DEPTH
"""
DIVA Specific KL-divergences
_STOCH : applied to Z_stoch
_COND_SUP : applied to mach_cond vs mach_enc
_COND_UNSUP : applied to mach_enc when unsupervised
"""
BETA_STOCH: float = 0.5 # 0.8
BETA_COND_SUP: float = 1.0 # 1.0
BETA_COND_UNSUP: float = 0.001 # 0.0086
"""
Loss function parameterization
GAMMA : applied to reconstructions of profiles and machine parameters
LAMBDA : applied to the physics losses
"""
PHYSICS: bool = True
GAMMA_PROF: float = 1200.0 # 550
GAMMA_MP: float = 20.0 # 2.6618
LAMBDA_PRESSURE: float = 500.0 # 55.0
LAMBDA_BETA: float= 1000.0 # 10.0
LAMDBA_BPOL: float = 100.0 # 31.0
""" Scheduler paramterizations """
SCHEDULER_STEP_SIZE: int = 10 # 10
SCHEDULER_GAMMA: int = 0.9 # 0.99
""" Save Information """
MODEL_NAME = 'DIVA'
# ['BTF', 'D_tot', 'N_tot', 'IpiFP', 'PNBI_TOT', 'PICR_TOT', 'PECR_TOT', 'k', 'delRoben', 'delRuntn', 'ahor', 'Rgeo', 'q95', 'Vol']
hparams = dict(mach_latent_dim=MACH_LATENT_DIM, stoch_latent_dim=STOCH_LATENT_DIM,
conv_filter_sizes=CONV_FILTER_SIZES, mp_layer_sizes=MP_REG_LAYER_SIZES,
GAMMA_PROF=GAMMA_PROF, GAMMA_MP=GAMMA_MP,
BETA_KLD_COND=BETA_COND_SUP, BETA_KLD_STOCH=BETA_STOCH, BETA_KLD_MACH=BETA_COND_UNSUP,
LAMBDA_PRESSURE=LAMBDA_PRESSURE, LAMBDA_BETA=LAMBDA_BETA, LAMDBA_BPOL=LAMDBA_BPOL,
physics=PHYSICS, model_name=MODEL_NAME)
def main(base_data_path: str = None):
dataset = MemMapDataset_O(data_path=base_data_path)
hparams['out_length'] = dataset.prof_length
hparams['action_dim'] = dataset.mp_dim
model = DIVA(**hparams)
model.double()
optimizer = torch.optim.Adam(model.parameters(9), lr=LR)
train_dl, val_dl, test_dl = initialize_dataloaders(dataset, batch_size=BATCH_SIZE)
training_loss, best_val_loss = [], np.inf
# epoch_iter = tqdm(range(EPOCHS))
epoch_iter = range(EPOCHS)
for epoch in epoch_iter:
epoch_loss, epoch_loss_dict = train_epoch(model, optimizer, train_dl, dataset)
training_loss.append(epoch_loss)
if epoch % 5 == 0:
val_loss, _ = test_epoch(model, val_dl, dataset)
if val_loss < best_val_loss:
print(f'Epoch: {epoch}\nNew best val loss: {val_loss:.4}, saving model')
save_model(model, hparams, dataset)
def train_epoch(model, optimizer, loader, dataset):
epoch_loss = 0.0
for steps, batch in enumerate(loader):
batch_profs, batch_mps = batch
preds = model.forward(batch_profs, batch_mps)
loss_dict = model.loss_function(batch, preds, dataset, step=steps)
loss = loss_dict.pop('loss') / len(loader)
loss.backward()
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.item()
return epoch_loss, loss_dict
def test_epoch(model, loader, dataset):
epoch_loss = 0.0
with torch.no_grad():
for steps, batch in enumerate(loader):
batch_profs, batch_mps = batch
preds = model.forward(batch_profs, batch_mps)
loss_dict = model.loss_function(batch, preds, dataset, step=steps)
loss = loss_dict.pop('loss') / len(loader)
epoch_loss += loss.item()
return epoch_loss, loss_dict
if __name__ == '__main__':
data_path: str = os.path.join(os.getenv('PROC_DIR'), 'JET_PDB_PULSE_ARRAYS')
main(data_path) | 4,108 | 35.362832 | 132 | py |
psi_2022 | psi_2022-main/src/common/utils.py | import torch
def save_model(model, hparams, dataset):
model_name = hparams['model_name']
save_dict = {'state_dict': model.state_dict(), 'hparams': hparams, 'dataset': dataset}
torch.save(save_dict, './' + model_name + '.pth')
def load_model(model_name):
save_dict = torch.load(f'./{model_name}')
return save_dict['state_dict'], save_dict['hparams'], save_dict['dataset'] | 393 | 42.777778 | 90 | py |
psi_2022 | psi_2022-main/src/common/physics_approximations.py | import torch
import numpy as np
boltzmann_constant = 1.380e-23
mu_0 = 1.256e-6
def find_tesep(profs):
ne, te = profs[:, 0], profs[:, 1]
teseps, neseps, rseps = np.empty(te.shape[0]), np.empty(te.shape[0]), np.empty(te.shape[0])
for k, (ne_slice, te_slice) in enumerate(zip(ne, te)):
l_idx, r_idx = 0, 1
while te_slice[r_idx] > 100:
l_idx += 1
r_idx += 1
if r_idx == 50:# or (len(te_slice) == 60 and r_idx == 60):
break
if r_idx == 50:
continue
weights_r, weights_l = get_weights(te_slice, l_idx, r_idx)
tesep_estimation = weights_l*te_slice[l_idx] + weights_r*te_slice[r_idx]
nesep_estimation = weights_l*ne_slice[l_idx] + weights_r*ne_slice[r_idx]
# rsep_estimation = weights_l*r_slice[l_idx] + weights_r*r_slice[r_idx]
teseps[k] = tesep_estimation
neseps[k] = nesep_estimation
rseps[k] = l_idx
# rsep_estimation = weights_l*x[idx_l] + weights_r*x[idx_r]
return teseps, neseps, rseps
def get_weights(te, idx_l, idx_r, query=100):
# Gets weighst as usual
dist = te[idx_r] - query + query - te[idx_l]
weights = (1-(te[idx_r] - query)/dist, 1-(query - te[idx_l])/dist)
return weights
def static_pressure_stored_energy_approximation(profs):
if not isinstance(profs, torch.Tensor):
profs = torch.from_numpy(profs)
return boltzmann_constant*torch.prod(profs, 1).sum(1)
def torch_shaping_approx(minor_radius, tri_u, tri_l, elongation):
triangularity = (tri_u + tri_l) / 2.0
b = elongation*minor_radius
gamma_top = -(minor_radius + triangularity)
gamma_bot = minor_radius - triangularity
alpha_top = -gamma_top / (b*b)
alpha_bot = -gamma_bot / (b*b)
top_int = (torch.arcsinh(2*torch.abs(alpha_top)*b) + 2*torch.abs(alpha_top)*b*torch.sqrt(4*alpha_top*alpha_top*b*b+1)) / (2*torch.abs(alpha_top))
bot_int = (torch.arcsinh(2*torch.abs(alpha_bot)*b) + 2*torch.abs(alpha_bot)*b*torch.sqrt(4*alpha_bot*alpha_bot*b*b+1)) / (2*torch.abs(alpha_bot))
return bot_int + top_int
def bpol_approx(minor_radius, tri_u, tri_l, elongation, current):
shaping = torch_shaping_approx(minor_radius, tri_u, tri_l, elongation)
return mu_0*current / shaping
def calculate_peped(profs):
if not isinstance(profs, torch.Tensor):
profs = torch.from_numpy(profs)
ne = profs[:, 0:1, :]
te = profs[:, 1:, :]
p = boltzmann_constant*ne*te
second_diff = torch.diff(p, n=2, dim=-1)
min_diff_val, min_diff_idx = torch.min(second_diff, dim=-1)
ped_loc = min_diff_idx -2
peped = torch.zeros((len(p)))
for k in range(len(peped)):
peped[k] = torch.select(p[k], dim=-1, index=ped_loc[k].item())
return peped, ped_loc
def beta_approximation(profiles_tensors, minor_radius, tri_u, tri_l, elongation, current, bt, beta_pol=False):
"""
To approximate beta!
The factor of 2 at the front is to compensate the ions which are nowhere to be found in this analysis.
The additional factor of 100 is to get it in percent form.
"""
e_c = 1.602e-19
bpol = bpol_approx(minor_radius, tri_u, tri_l, elongation, current)
if beta_pol:
pressure_ped, _ = calculate_peped(profiles_tensors)
beta_pol_approx = 2*mu_0*pressure_ped / (bpol*bpol)
return beta_pol_approx
density, temperature = profiles_tensors[:, 0, :], profiles_tensors[:, 1, :]
pressure_prof = density*temperature
pressure_average = pressure_prof.mean(-1)
# TODO: This beta average is not really realistic I find... but am interested to see how it impacts
return (100*2)*e_c*2*mu_0 * pressure_average / (bt*bt + bpol*bpol)
def pressure_calculation(profs: torch.Tensor, dataset=None, normalize=True):
if normalize and dataset is not None:
profs = dataset.denorm_profiles(profs.copy())
return boltzmann_constant*torch.prod(profs, 1)
def calculate_physics_constraints(profiles_og, mps_og, train_set):
# Denormalize everything!
profiles = torch.clone(profiles_og)
profiles = train_set.denorm_profiles(profiles, to_torch=True)
mps = torch.clone(mps_og)
mps = train_set.denorm_mps(mps, to_torch=True)
sp = static_pressure_stored_energy_approximation(profiles)
minor_radius, tri_u, tri_l, elongation, current, bt = mps[:, 2], mps[:, 4],mps[:, 5],mps[:, 6], mps[:, 8], mps[:, 9]
bpol = bpol_approx(minor_radius, tri_u, tri_l, elongation, current)
beta = beta_approximation(profiles, minor_radius, tri_u, tri_l, elongation, current, bt)
pressure = pressure_calculation(profiles, normalize=False)
return sp, beta, bpol, pressure
| 4,682 | 44.028846 | 149 | py |
psi_2022 | psi_2022-main/src/models/DIVA.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Tuple
from .model_utils import PRIORREG, AUXREG, ENCODER, DECODER
from src.common.physics_approximations import *
MIN_STDDEV = 1e-15
class DIVA(nn.Module):
def __init__(self,
mach_latent_dim: int, stoch_latent_dim: int,
conv_filter_sizes: List[int], mp_layer_sizes: List[int],
input_dim: int = 2, out_length: int = 50, action_dim: int = 15,
clamping_zero_tensor: torch.Tensor = None,
BETA_KLD_COND: float=0.001, BETA_KLD_STOCH: float=0.001, BETA_KLD_MACH: float=0.001,
GAMMA_PROF: float=100.0, GAMMA_MP: float=100.0,
LAMBDA_PRESSURE: float=10.0, LAMBDA_BETA: float=1.0, LAMDBA_BPOL: float = 1.0,
physics: bool = True, **kwargs):
super(DIVA, self).__init__()
self.physics = physics
self.mach_latent_dim, self.stoch_latent_dim = mach_latent_dim, stoch_latent_dim
self.conv_filter_sizes, self.mp_encoder_layer_sizes = conv_filter_sizes, mp_layer_sizes
self.trans_conv_filter_sizes, self.mp_decoder_layer_sizes = conv_filter_sizes[::-1], mp_layer_sizes[::-1]
self.prof_len, self.mp_dim = out_length, action_dim
self.prof_encoder = ENCODER(filter_sizes=self.conv_filter_sizes, in_ch=input_dim, in_length=self.prof_len)
self.prof_decoder = DECODER(filter_sizes=self.trans_conv_filter_sizes, end_conv_size=self.prof_encoder.end_conv_size, clamping_zero_tensor=clamping_zero_tensor)
self.mp_encoder = PRIORREG(in_dim=self.mp_dim, out_dim=self.mach_latent_dim, hidden_dims=self.mp_encoder_layer_sizes, make_prior=True)
self.mp_decoder = AUXREG(in_dim = self.mach_latent_dim, out_dim=self.mp_dim, hidden_dims=self.mp_decoder_layer_sizes)
in_prior_size = self.prof_encoder.end_conv_size*self.conv_filter_sizes[-1]
self.z_mu_mach = nn.Linear(in_prior_size, self.mach_latent_dim)
self.z_var_mach = nn.Linear(in_prior_size, self.mach_latent_dim)
self.z_mu_stoch = nn.Linear(in_prior_size, self.stoch_latent_dim)
self.z_var_stoch = nn.Linear(in_prior_size, self.stoch_latent_dim)
self.decoder_input = nn.Linear(self.stoch_latent_dim + self.mach_latent_dim, self.trans_conv_filter_sizes[0]*self.prof_encoder.end_conv_size)
self.output_layer = nn.Linear(self.prof_decoder.final_size, out_length)
self.GAMMA_PROF, self.GAMMA_MP = GAMMA_PROF, GAMMA_MP
self.BETA_KLD_COND, self.BETA_KLD_STOCH , self.BETA_KLD_MACH = BETA_KLD_COND, BETA_KLD_STOCH, BETA_KLD_MACH
self.LAMBDA_PRESSURE, self.LAMBDA_BETA, self.LAMDBA_BPOL = LAMBDA_PRESSURE, LAMBDA_BETA, LAMDBA_BPOL
def forward(self, prof_t: torch.Tensor, mp_t: torch.Tensor, **kwargs) -> List[torch.Tensor]:
z_enc, z_mach_enc, z_stoch_enc, mu_mach_enc, var_mach_enc, mu_stoch_enc, var_stoch_enc = self.prof2z(prof_t)
z_cond, mu_mach_cond, var_mach_cond = self.mp2z(mp_t)
prof_out, mp_out = self.z2prof(z_enc), self.z2mp(z_mach_enc)
return [z_enc, z_mach_enc, z_stoch_enc, mu_mach_enc, var_mach_enc, mu_stoch_enc, var_stoch_enc,
z_cond, mu_mach_cond, var_mach_cond,
prof_out, mp_out]
def prof2z(self, prof):
""" Encode the profile to z_stoch and z_mach"""
enc = self.prof_encoder(prof)
mu_mach, var_mach, mu_stoch, var_stoch = self.z_mu_mach(enc), torch.clamp(torch.nn.functional.softplus(self.z_var_mach(enc)), min=MIN_STDDEV), self.z_mu_stoch(enc), torch.clamp(torch.nn.functional.softplus(self.z_var_stoch(enc)), min=MIN_STDDEV)
z_mach, z_stoch = self.reparameterize(mu_mach, var_mach), self.reparameterize(mu_stoch, var_stoch)
z = torch.cat([z_mach, z_stoch], 1)
return [z, z_mach, z_stoch, mu_mach, var_mach, mu_stoch, var_stoch]
def mp2z(self, mp):
""" Conditional prior on z_mach via machine paramters """
mu_mach_cond, var_mach_cond = self.mp_encoder(mp)
z_cond = self.reparameterize(mu_mach_cond, var_mach_cond)
return [z_cond, mu_mach_cond, var_mach_cond]
def z2prof(self, z):
""" Decode z_mach and z_stoch to profile """
z = self.decoder_input(z)
dec = self.prof_decoder(z)
prof = self.output_layer(dec)
return prof
def z2mp(self, z_mach):
""" Decode z_mach to machine parameters """
mp_out = self.mp_decoder(z_mach)
return mp_out
def reparameterize(self, mu: torch.Tensor, var: torch.Tensor) -> torch.Tensor:
"""Reparameterization trick to sample N(mu, var) from N(0, 1)
Parameters
----------
mu : torch.Tensor
Mean of the latent distribution
var : torch.Tensor
standard deviation of the latent distribution
Returns
-------
torch.Tensor
mu + e^(var/2) + randn()
"""
std = torch.exp(0.5 * var)
eps = torch.randn_like(std)
z = mu + eps*std
return z
def inference(self, mp_in, prof_in=None, from_mean: bool = True):
""" DO CONDITIONAL INFERENCE! """
# z_enc, z_mach_enc, z_stoch_enc, mu_mach_enc, var_mach_enc, mu_stoch_enc, var_stoch_enc = # self.prof2z(prof_t)
z_mach_cond, mu_mach_cond, var_mach_cond = self.mp2z(mp_in)
if prof_in is not None:
_, _, z_stoch_rand, _, _, _, _ = self.prof2z(prof_in)
else:
z_stoch_rand = torch.normal(0, 1, size=(len(z_mach_cond), self.stoch_latent_dim))
if not from_mean:
z = torch.cat([z_mach_cond, z_stoch_rand], 1)
out_mps = self.z2mp(z_mach_cond)
else:
z = torch.cat([mu_mach_cond, z_stoch_rand], 1)
out_mps = self.z2mp(mu_mach_cond)
out_profs = self.z2prof(z)
return out_profs, out_mps, mu_mach_cond
def loss_function(self, inputs, outputs, train_set=None, step=0):
prof_in, mp_in = inputs
_, _, _, mu_mach_enc, var_mach_enc, mu_stoch_enc, var_stoch_enc, _, mu_mach_cond, var_mach_cond, prof_out, mp_out = outputs
recon_prof, recon_mp = F.mse_loss(prof_in, prof_out), F.mse_loss(mp_in, mp_out)
recon_loss = self.GAMMA_MP*recon_mp + self.GAMMA_PROF*recon_prof
physics, sp_loss, beta_loss, bpol_loss = torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0])
"""
# kld_stoch measures the KL-div between the stochastic latent space priors given by the profile encoding against a normal distribution
# kld_cond measures KL-div between the machine latent space priors given by machine parameters against that given by profile encoding
# TODO: kld_unsup measures KL-div between mach latent space priors given by profile encoding against a normal distribution
"""
kld_stoch = torch.distributions.kl.kl_divergence(torch.distributions.normal.Normal(mu_stoch_enc, torch.exp(0.5*var_stoch_enc)), torch.distributions.normal.Normal(0, 1)).mean(0).sum()
kld_mach_sup = torch.distributions.kl.kl_divergence(torch.distributions.normal.Normal(mu_mach_enc, torch.exp(0.5*var_mach_enc)), torch.distributions.normal.Normal(mu_mach_cond, torch.exp(0.5*var_mach_cond))).mean(0).sum()
kld_mach_unsup = torch.distributions.kl.kl_divergence(torch.distributions.normal.Normal(mu_mach_enc, torch.exp(0.5*var_mach_enc)), torch.distributions.normal.Normal(0, 1)).mean(0).sum()
# add all_kld together
if step % 2 == 0:
kld_loss = self.BETA_KLD_STOCH*kld_stoch + self.BETA_KLD_COND*kld_mach_sup
else:
kld_loss = self.BETA_KLD_STOCH*kld_stoch + self.BETA_KLD_MACH*kld_mach_unsup
sp_in, beta_in, bpol_in, pressure_in = calculate_physics_constraints(prof_in, mp_in, train_set)
sp_out, beta_out, bpol_out, pressure_out = calculate_physics_constraints(prof_out, mp_out, train_set)
sp_loss, beta_loss, bpol_loss = F.mse_loss(sp_in, sp_out), F.mse_loss(pressure_in, pressure_out), F.mse_loss(bpol_in, bpol_out)
physics = self.LAMBDA_PRESSURE*sp_loss + self.LAMBDA_BETA*beta_loss + self.LAMDBA_BPOL*bpol_loss
loss = recon_loss + kld_loss + physics
return dict(loss=loss,
recon_loss=recon_loss, recon_mp=recon_mp, recon_prof=recon_prof,
kld_loss=kld_loss, kld_cond=kld_mach_sup, kld_mach=kld_mach_unsup, kld_stoch=kld_stoch,
physics=physics, sp_loss=sp_loss, beta_loss=beta_loss, bpol_loss=bpol_loss)
"""
def loss_function(self, inputs, outputs, train_set=None, step=0):
prof_in, mp_in = inputs
_, _, _, mu_mach_enc, var_mach_enc, mu_stoch_enc, var_stoch_enc, _, mu_mach_cond, var_mach_cond, prof_out, mp_out = outputs
recon_prof, recon_mp = F.mse_loss(prof_in, prof_out), F.mse_loss(mp_in, mp_out)
recon_loss = self.GAMMA_MP*recon_mp + self.GAMMA_PROF*recon_prof
physics, sp_loss, beta_loss, bpol_loss = torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0]), torch.Tensor([0])
"""
# kld_stoch measures the KL-div between the stochastic latent space priors given by the profile encoding against a normal distribution
# kld_cond measures KL-div between the machine latent space priors given by machine parameters against that given by profile encoding
# TODO: kld_unsup measures KL-div between mach latent space priors given by profile encoding against a normal distribution
"""
kld_stoch = torch.distributions.kl.kl_divergence(
torch.distributions.normal.Normal(mu_stoch_enc, torch.exp(0.5*var_stoch_enc)),
torch.distributions.normal.Normal(0, 1)
).mean(0).sum()
kld_mach_sup = torch.distributions.kl.kl_divergence(
torch.distributions.normal.Normal(mu_mach_enc, torch.exp(0.5*var_mach_enc)),
torch.distributions.normal.Normal(mu_mach_cond, torch.exp(0.5*var_mach_cond)),
).mean(0).sum()
kld_mach_unsup = torch.distributions.kl.kl_divergence(
torch.distributions.normal.Normal(mu_mach_enc, torch.exp(0.5*var_mach_enc)),
torch.distributions.normal.Normal(0, 1)
).mean(0).sum()
# add all_kld together
if step % 2 == 0:
kld_loss = self.BETA_KLD_STOCH*kld_stoch + self.BETA_KLD_COND*kld_mach_sup
else:
kld_loss = self.BETA_KLD_STOCH*kld_stoch + self.BETA_KLD_MACH*kld_mach_unsup
# add together
if train_set is not None and self.physics:
sp_in, beta_in, bpol_in, pressure_in = calculate_physics_constraints(prof_in, mp_in, train_set)
sp_out, beta_out, bpol_out, pressure_out = calculate_physics_constraints(prof_out, mp_out, train_set)
sp_loss, beta_loss, bpol_loss = F.mse_loss(sp_in, sp_out), F.mse_loss(pressure_in, pressure_out), F.mse_loss(bpol_in, bpol_out)
physics = self.LAMBDA_SP*sp_loss + self.LAMBDA_BETA*beta_loss + self.LAMDBA_BPOL*bpol_loss
loss = recon_loss + kld_loss + physics
return dict(loss=loss,
recon_loss=recon_loss, recon_mp=recon_mp, recon_prof=recon_prof,
kld_loss=kld_loss, kld_cond=kld_mach_sup, kld_mach=kld_mach_unsup, kld_stoch=kld_stoch,
physics=physics, sp_loss=sp_loss, beta_loss=beta_loss, bpol_loss=bpol_loss)
""" | 11,083 | 53.871287 | 253 | py |
psi_2022 | psi_2022-main/src/models/model_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
import numpy as np
def get_conv_output_size(initial_input_size, number_blocks, max_pool=True):
""" The conv blocks we use keep the same size but use max pooling, so the output of all convolution blocks will be of length input_size / 2"""
if max_pool==False:
return initial_input_size
out_size = initial_input_size
for i in range(number_blocks):
out_size = int(out_size / 2)
return out_size
def get_trans_output_size(input_size, stride, padding, kernel_size):
""" A function to get the output length of a vector of length input_size after a tranposed convolution layer"""
return (input_size -1)*stride - 2*padding + (kernel_size - 1) +1
def get_final_output(initial_input_size, number_blocks, number_trans_per_block, stride, padding, kernel_size):
"""A function to get the final output size after tranposed convolution blocks"""
out_size = initial_input_size
for i in range(number_blocks):
for k in range(number_trans_per_block):
out_size = get_trans_output_size(out_size, stride, padding, kernel_size)
return out_size
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class UnFlatten(nn.Module):
def __init__(self, size, length):
super(UnFlatten, self).__init__()
self.size = size
self.length = length
def forward(self, input):
out = input.view(input.size(0), self.size, self.length)
return out
class AUXREG(nn.Module):
"""Module block for predicing mps from the latent dimensions"""
def __init__(self, in_dim: int, out_dim: int, hidden_dims: List[int], ):
super().__init__()
self.in_dim = in_dim # Latent space size
self.out_dim = out_dim # Machine parameter size
self.hidden_dims = hidden_dims
self.block = nn.ModuleList()
last_dim = in_dim
for dim in self.hidden_dims:
self.block.append(nn.Linear(last_dim, dim))
self.block.append(nn.ReLU())
last_dim = dim
self.block.append(nn.Linear(last_dim, out_dim))
def forward(self, z):
for lay in self.block:
z = lay(z)
return z
class PRIORREG(nn.Module):
"""Module block for conditional prior via machine paramteers"""
def __init__(self, in_dim: int, hidden_dims: List[int], out_dim: int = None, make_prior=False):
super().__init__()
self.in_dim = in_dim # Machine parameter size
self.out_dim = out_dim # Latent dimension size
self.hidden_dims = hidden_dims
self.make_prior = make_prior
self.block = nn.ModuleList()
last_dim = in_dim
for dim in self.hidden_dims:
self.block.append(nn.Linear(last_dim, dim))
self.block.append(nn.ReLU())
last_dim = dim
# self.block.append(nn.Linear(last_dim, out_dim))
if self.make_prior:
self.fc_mu = nn.Linear(last_dim, self.out_dim)
self.fc_var = nn.Linear(last_dim, self.out_dim)
else:
self.block.append(nn.Linear(last_dim, self.out_dim))
def forward(self, z):
for lay in self.block:
z = lay(z)
if self.make_prior:
mu, var = self.fc_mu(z), self.fc_var(z)
return mu, var
else:
return z
class ENCODER(nn.Module):
def __init__(self, filter_sizes: List[int], in_length: int = 75, in_ch: int = 2):
super().__init__()
hidden_channels = filter_sizes
self.kernel_size = 5
self.padding = 0
self.stride = 1
self.pool_padding = 0
self.pool_dilation = 1
self.pool_kernel_size = 2
self.pool_stride = self.pool_kernel_size
self.block = nn.ModuleList()
self.end_conv_size = in_length # [(W - K + 2P) / S] + 1
for dim in hidden_channels:
self.block.append(nn.Conv1d(in_ch, dim, kernel_size=self.kernel_size, padding=self.padding))
self.end_conv_size = ((self.end_conv_size - self.kernel_size + 2*self.padding) / self.stride) + 1
self.block.append(nn.ReLU())
self.block.append(nn.MaxPool1d(self.pool_kernel_size, padding=self.pool_padding, dilation=self.pool_dilation, ))
self.end_conv_size = ((self.end_conv_size + 2*self.pool_padding - self.pool_dilation*(self.pool_kernel_size -1)-1) / self.pool_stride) + 1
in_ch = dim
self.block.append(Flatten())
self.end_conv_size = int(self.end_conv_size)
def forward(self, x):
# print('Encoder in shape', x.shape)
for lay in self.block:
x = lay(x)
# print(x.shape)
# print('encoder out shape', x.shape)
return x
class DECODER(nn.Module):
def __init__(self, filter_sizes: List[int], end_conv_size: int, clamping_zero_tensor: torch.Tensor):
super().__init__()
in_ch = 2
self.hidden_channels = filter_sizes
self.end_conv_size = end_conv_size
self.num_trans_conv_blocks = 1
self.trans_stride = 1
self.trans_padding = 0
self.trans_kernel_size = 2
self.block = nn.ModuleList()
if self.hidden_channels[-1] != in_ch:
self.hidden_channels.append(in_ch)
self.block.append(UnFlatten(size=self.hidden_channels[0], length=self.end_conv_size))
# Needs trasnpose kernel instead
for i in range(len(self.hidden_channels) - 1):
self.block.append(nn.ConvTranspose1d(self.hidden_channels[i], self.hidden_channels[i+1], kernel_size=self.trans_kernel_size))
self.block.append(nn.ReLU())
self.final_size = get_final_output(end_conv_size, len(self.hidden_channels) -1, self.num_trans_conv_blocks, self.trans_stride, self.trans_padding, self.trans_kernel_size)
self.clamping_zero_tensor = clamping_zero_tensor
if self.clamping_zero_tensor is not None:
print('Applying a clamping tensor to reconstruct only non-negative profiles!')
def forward(self, x):
# print('Decoder in shape', x.shape)
for lay in self.block:
x = lay(x)
if self.clamping_zero_tensor is not None:
torch.clamp(x, self.clamping_zero_tensor)
# print('Decoder out shape', x.shape)
return x
| 6,482 | 38.530488 | 178 | py |
psi_2022 | psi_2022-main/src/data/_utils.py | import numpy as np
import torch
def initialize_dataloaders(dataset, val_split: float = 0.3, test_split: float = 0.1, batch_size: int = 256, nw: int = 4):
pulse_idxs = np.arange(dataset.total_num_pulses)
make_vals = True
# Need to randomly shuffle the pulses, as they are sorted originally from 30000 -> onward!
np.random.shuffle(pulse_idxs)
num_val_pulses = int(val_split * dataset.total_num_pulses)
num_test_pulses = int(test_split * dataset.total_num_pulses)
train_pulse_idxs = pulse_idxs[:-(num_val_pulses + num_test_pulses)]
val_pulse_idxs = pulse_idxs[-(num_val_pulses + num_test_pulses):-num_test_pulses]
test_pulse_idxs = pulse_idxs[-num_test_pulses:]
train_norms = dataset.get_norms(train_pulse_idxs, return_norms=True)
# Need to then get the relevant slices from the list of pulses...
train_slice_idxs, val_slice_idxs, test_slice_idxs = [], [], []
for set_pulse_idxs, set_slice_idxs in zip([train_pulse_idxs, val_pulse_idxs, test_pulse_idxs], [train_slice_idxs, val_slice_idxs, test_slice_idxs]):
for num in set_pulse_idxs:
if num == 0:
start_slice_idx = 0
else:
start_slice_idx = dataset.cumsum_num_slices[num-1] + 1 # the plus one comes from the cumsum_num_slices subtracting one at the begninggn
end_slice_idx = dataset.cumsum_num_slices[num] + 1
set_slice_idxs.extend(np.arange(start_slice_idx, end_slice_idx))
dataset.train_slice_idxs = train_slice_idxs
dataset.val_slice_idxs = train_slice_idxs
dataset.test_slice_idxs = train_slice_idxs
train_dataset = torch.utils.data.Subset(dataset, train_slice_idxs)
val_dataset = torch.utils.data.Subset(dataset, val_slice_idxs)
test_dataset = torch.utils.data.Subset(dataset, test_slice_idxs)
pin_memory = True
dataloader_kwargs = dict(
batch_size=batch_size,
shuffle=True,
num_workers=nw,
pin_memory=pin_memory
)
train_loader = torch.utils.data.DataLoader(train_dataset, **dataloader_kwargs)
valid_loader = torch.utils.data.DataLoader(val_dataset, **dataloader_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **dataloader_kwargs)
return train_loader, valid_loader, test_loader
def get_dataloaders(dataset, batch_size: int = 256):
train_dataset = torch.utils.data.Subset(dataset, dataset.train_slice_idxs)
val_dataset = torch.utils.data.Subset(dataset, dataset.val_slice_idxs)
test_dataset = torch.utils.data.Subset(dataset, dataset.test_slice_idxs)
dataloader_kwargs = dict(
batch_size=batch_size,
shuffle=False,
)
train_loader = torch.utils.data.DataLoader(train_dataset, **dataloader_kwargs)
valid_loader = torch.utils.data.DataLoader(val_dataset, **dataloader_kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, **dataloader_kwargs)
return train_loader, valid_loader, test_loader
| 3,007 | 47.516129 | 153 | py |
psi_2022 | psi_2022-main/src/data/memmap_dataset_observational.py | from torch.utils.data import Dataset
from mmap_ninja.ragged import RaggedMmap
import os
import numpy as np
from typing import Tuple
import torch
def compute_raggedmmap(data_dir: str, data_string: str, batch_size: int = 128) -> RaggedMmap:
"""Compute the ragged map
Parameters
----------
data_dir : str
Base dir where arrays are stored and where mmaps will be stored
data_string : str
PROFS, MP, RADII, etc., ,
batch_size : int, optional
I think this is actually irrelevant... by default 128
Returns
-------
RaggedMmap
Shout out to mmap_ninja
"""
def load_data(paths):
for path in paths:
yield np.load(path)
# TODO: Change the string to include interface Memmap
save_name = os.path.join(data_dir, f'{data_string}_MMAP')
if os.path.exists(save_name):
ragged_mmap = RaggedMmap(save_name)
print(f'A ragged map for {data_string} exists at {save_name} with length: {len(ragged_mmap)}')
else:
relevant_paths = sorted([os.path.join(data_dir, fname) for fname in os.listdir(data_dir) if fname.endswith(f"_{data_string}.npy")])
ragged_mmap = RaggedMmap.from_generator(out_dir=save_name,
sample_generator=load_data(relevant_paths),
batch_size=batch_size,
verbose=True)
print(f'Ragged mmap for {data_string} saved to {save_name}')
return ragged_mmap
class MemMapDataset_O(Dataset):
"""A mememory mapped dataset.
"""
def __init__(self, data_path: str, device: str = 'cpu', filter_mps = None):
self.data = {}
self.data['profs'] = compute_raggedmmap(data_path, data_string='PROFS')
self.data['mps'] = compute_raggedmmap(data_path, data_string='MP')
self.total_num_pulses = len(self.data['profs'])
self.list_num_slices: list = [len(pulse) for pulse in self.data['profs']]
self.cumsum_num_slices: np.ndarray = np.cumsum(self.list_num_slices) - 1
self.total_num_slices = sum(self.list_num_slices)
self.prof_length = self.data['profs'][0].shape[-1]
self.mp_dim = self.data['mps'][0].shape[-1]
self.factor = np.ones((2,self.prof_length))
self.factor[0, :] *= 1e-19
self.device = None
self.filter_mps = filter_mps
def get_pulse_idx(self, idx: int) -> Tuple[int, int]:
"""A fun function that calculates which slice to take from the mmap
Since the mmap is (imo) a list of pulses, we need to find which pulse the queried idx is coming from.
This is calculated by looking at the,m minimum value of the cumulative sum of all the slices across pulses that are >= idx
Then the internal pulse slice idx is just queried idx subtracted by the cumulative sum up to that pulse ( + 1)
Parameters
----------
idx : int
a given slice, which can take on values of [0 -> total_num_slices]
Returns
-------
Tuple[int, int] : (pulse_idx, slice_idx)
"""
# list_num_slices: list = [len(pulse) for pulse in self.prof_mmap]
# cumsum_num_slices: np.ndarray = np.cumsum(self.list_num_slices) - 1
pulse_idx: int = np.where(self.cumsum_num_slices >= idx)[0][0]
slice_idx: int = (idx - (self.cumsum_num_slices[pulse_idx] + 1))
return pulse_idx, slice_idx
def get_norms(self, relevant_idxs, return_norms=True) -> None:
profs = self.data['profs'][relevant_idxs]
mps = self.data['mps'][relevant_idxs]
self.prof_means = np.mean(np.stack([np.mean(prof*self.factor, axis=0) for prof in profs], 0), 0)
self.prof_stds = np.mean(np.stack([np.std(prof*self.factor, axis=0) for prof in profs], 0), 0)
self.mp_means = np.mean(np.stack([np.mean(mp, axis=0) for mp in mps], 0), 0)
self.mp_stds = np.mean(np.stack([np.std(mp, axis=0) for mp in mps], 0), 0)
if self.device is not None:
self.prof_means, self.prof_stds, self.mp_means, self.mp_stds = self.prof_means.to(self.device), self.prof_stds.to(self.device), self.mp_means.to(self.device), self.mp_stds.to(self.device)
return self.return_norms()
def set_norms(self, prof_means, prof_stds, mp_means, mp_stds) -> None:
self.prof_means, self.prof_stds, self.mp_means, self.mp_stds = prof_means, prof_stds, mp_means, mp_stds
def return_norms(self) -> dict:
return dict(prof_means=self.prof_means, prof_stds=self.prof_stds, mp_means=self.mp_means, mp_stds=self.mp_stds)
def norm_profiles(self, profiles):
return (profiles*self.factor - self.prof_means) / self.prof_stds
def denorm_profiles(self, profiles, to_torch=False):
if not to_torch:
return ((profiles*self.prof_stds) + self.prof_means) / self.factor
else:
return ((profiles*torch.from_numpy(self.prof_stds)) + torch.from_numpy(self.prof_means)) / torch.from_numpy(self.factor)
def norm_mps(self, mps):
return (mps - self.mp_means) / self.mp_stds
def denorm_mps(self, mps, to_torch=False):
if to_torch:
return (mps*torch.from_numpy(self.mp_stds)) + torch.from_numpy(self.mp_means)
if self.filter_mps is not None:
# mps = self.filter_mps(mps)
return (mps*self.filter_mps(self.mp_stds)) + self.filter_mps(self.mp_means)
else:
return (mps*self.mp_stds) + self.mp_means
def __len__(self):
return self.total_num_slices
def __getitem__(self, idx):
pulse_idx_to_take_from, slice_idx_to_take_from = self.get_pulse_idx(idx)
sample_pulse_profs = self.data['profs'][pulse_idx_to_take_from]
sample_pulse_mps = self.data['mps'][pulse_idx_to_take_from]
profs = sample_pulse_profs[slice_idx_to_take_from]
mps = sample_pulse_mps[slice_idx_to_take_from]
profs, mps = self.norm_profiles(profs), self.norm_mps(mps)
if self.filter_mps is not None:
mps = self.filter_mps(mps)
return profs, mps
| 6,190 | 43.221429 | 199 | py |
LLPFC | LLPFC-main/main.py | import json
import sys
import numpy as np # set the random seed for torchvision
import logging
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from llpfc import llpfc
from kl import kl
from llpvat import llpvat
from llpgan import llpgan
from utils import set_optimizer, set_device, set_reproducibility, set_data_and_model, set_dataset_class, get_args, set_generator
def main(args):
set_reproducibility(args)
logging.basicConfig(level=logging.INFO, filename=args.logging_filename, filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger()
device = set_device(args)
logger.info("\n\n")
logger.info("program starts")
logger.info("running arguments %s" % sys.argv)
json_data = dict()
json_data['args'] = sys.argv
json_data['epoch_vs_test_accuracy'] = []
llp_data, transform_train, num_classes, model, test_loader = set_data_and_model(args)
model = model.to(device)
total_epochs = args.total_epochs
optimizer, scheduler = set_optimizer(args, model, total_epochs)
if args.algorithm == "llpfc":
dataset_class = set_dataset_class(args)
llpfc(llp_data,
transform_train,
scheduler,
model,
optimizer,
test_loader,
dataset_class,
device,
args,
logger,
json_data)
elif args.algorithm == "kl":
dataset_class = set_dataset_class(args)
training_data, bag2indices, bag2size, bag2prop = llp_data
kl_train_dataset = dataset_class(training_data, bag2indices, bag2prop, transform_train)
train_loader = torch.utils.data.DataLoader(dataset=kl_train_dataset,
batch_size=args.train_batch_size,
shuffle=True)
val_loader = None
kl(model, optimizer, train_loader, scheduler, total_epochs, val_loader, test_loader, device, logger, json_data)
elif args.algorithm == "llpvat":
dataset_class = set_dataset_class(args)
training_data, bag2indices, bag2size, bag2prop = llp_data
kl_train_dataset = dataset_class(training_data, bag2indices, bag2prop, transform_train)
llpvat(kl_train_dataset, scheduler, model, optimizer, test_loader, device, args, logger, json_data)
elif args.algorithm == "llpgan":
dataset_class = set_dataset_class(args)
training_data, bag2indices, bag2size, bag2prop = llp_data
kl_train_dataset = dataset_class(training_data, bag2indices, bag2prop, transform_train)
gen = set_generator(args)
gen = gen.to(device)
gen_opt, gen_sch = set_optimizer(args, gen, total_epochs)
llpgan(kl_train_dataset,
model,
gen,
optimizer,
gen_opt,
scheduler,
gen_sch,
test_loader,
device,
args,
logger,
json_data)
if args.save_path is not None:
torch.save(model.state_dict(), args.save_path)
if args.path_to_json is not None:
with open(args.path_to_json, 'w') as f:
json.dump(json_data, f)
logger.info("training completed")
logger.info("")
if __name__ == "__main__":
parser = get_args()
main(parser)
| 3,444 | 35.648936 | 128 | py |
LLPFC | LLPFC-main/llpvat.py | import torch
import torch.nn as nn
from llpvatlib.train_fun import llpvat_train_by_bag
from llpfclib.train_fun import test_model
from llpvatlib.utils import VATLoss
def loss_f_test(x, y, device, epsilon=1e-8):
x = torch.clamp(x, epsilon, 1 - epsilon)
return nn.functional.nll_loss(torch.log(x), y, reduction='sum')
def llpvat(kl_train_dataset, scheduler, model, optimizer, test_loader, device, args, logger, json_data):
train_loader = torch.utils.data.DataLoader(dataset=kl_train_dataset, batch_size=args.train_batch_size, shuffle=True)
vat_loss_f = VATLoss(xi=args.vat_xi, eps=args.vat_eps, ip=args.vat_ip).to(device)
for epoch in range(args.total_epochs):
logger.info(f"Epoch-{epoch}")
logger.info(f" lr: {optimizer.param_groups[0]['lr']}")
llpvat_train_by_bag(model, optimizer, train_loader, vat_loss_f, epoch, device, scheduler, logger)
if test_loader is not None:
acc, test_error = test_model(model, test_loader, loss_f_test, device)
logger.info(f" test_error = {test_error}, accuracy = {100 * acc}%")
json_data['epoch_vs_test_accuracy'].append({'epoch': epoch, 'test_acc': acc, 'test_error': test_error})
| 1,215 | 49.666667 | 120 | py |
LLPFC | LLPFC-main/llpfc.py | import torch
import torch.nn as nn
from torch.distributions.constraints import simplex
from torch.utils.data import SubsetRandomSampler
import numpy as np
from llpfclib.make_groups import make_groups_forward
from llpfclib.train_fun import train_model_forward_one_epoch, test_model, validate_model_forward
def loss_f(x, y, weights, device, epsilon=1e-8):
assert torch.all(simplex.check(x))
x = torch.clamp(x, epsilon, 1 - epsilon)
unweighted = nn.functional.nll_loss(torch.log(x), y, reduction='none')
weights /= weights.sum()
return (unweighted * weights).sum()
def loss_f_val(x, y, weights, device, epsilon=1e-8):
assert torch.all(simplex.check(x))
x = torch.clamp(x, epsilon, 1 - epsilon)
unweighted = nn.functional.nll_loss(torch.log(x), y, reduction='none')
return (unweighted * weights).sum()
def loss_f_test(x, y, device, epsilon=1e-8):
x = torch.clamp(x, epsilon, 1 - epsilon)
return nn.functional.nll_loss(torch.log(x), y, reduction='sum')
def llpfc(llp_data,
transform_train,
scheduler,
model,
optimizer,
test_loader,
dataset_class,
device,
args,
logger,
json_data):
training_data, bag2indices, bag2size, bag2prop = llp_data
num_regroup = -1
train_sampler = None
valid_sampler = None
llp_valid_loader = None
for epoch in range(args.total_epochs):
if epoch % args.num_epoch_regroup == 0:
instance2group, group2transition, instance2weight, noisy_y = make_groups_forward(args.num_classes,
bag2indices,
bag2size,
bag2prop,
args.noisy_prior_choice,
args.weights,
logger)
fc_train_dataset = dataset_class(training_data,
noisy_y,
group2transition,
instance2weight,
instance2group,
transform_train)
if (llp_valid_loader is None) and args.validate: # always use the first group assigment to validate
VAL_PROP = 0.1
num_data_points = len(fc_train_dataset)
split = int(np.floor(VAL_PROP * num_data_points))
indices = list(range(num_data_points))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
llp_valid_loader = torch.utils.data.DataLoader(dataset=fc_train_dataset, sampler=valid_sampler,
batch_size=args.train_batch_size)
if train_sampler is None:
llp_train_loader = torch.utils.data.DataLoader(dataset=fc_train_dataset, shuffle=True,
batch_size=args.train_batch_size)
else:
llp_train_loader = torch.utils.data.DataLoader(dataset=fc_train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size)
num_regroup += 1
logger.info(f"Regroup-{num_regroup} Epoch-{epoch}")
logger.info(f" lr: {optimizer.param_groups[0]['lr']}")
train_model_forward_one_epoch(model, loss_f, optimizer, llp_train_loader, device, epoch, scheduler, logger)
if test_loader is not None:
acc, test_error = test_model(model, test_loader, loss_f_test, device)
logger.info(f" test_error = {test_error}, accuracy = {100 * acc}%")
json_data['epoch_vs_test_accuracy'].append({'epoch': epoch, 'test_acc': acc, 'test_error': test_error})
if args.validate:
assert llp_valid_loader is not None
val_loss = validate_model_forward(model, loss_f_val, llp_valid_loader, device)
logger.info(f" valid_loss = {val_loss}")
| 4,654 | 48.521277 | 117 | py |
LLPFC | LLPFC-main/llpgan.py | import torch
import torch.nn as nn
from llpganlib.train_fun import llpgan_train_by_bag, test_llpgan
def loss_f_test(x, y, device, epsilon=1e-8):
x = torch.clamp(x, epsilon, 1 - epsilon)
return nn.functional.nll_loss(torch.log(x), y, reduction='sum')
def llpgan(kl_train_dataset,
dis,
gen,
dis_opt,
gen_opt,
dis_sch,
gen_sch,
test_loader,
device,
args,
logger,
json_data):
train_loader = torch.utils.data.DataLoader(dataset=kl_train_dataset, batch_size=args.train_batch_size, shuffle=True)
for epoch in range(args.total_epochs):
logger.info(f"Epoch-{epoch}")
logger.info(f" dis lr: {dis_opt.param_groups[0]['lr']}")
logger.info(f" gen lr: {gen_opt.param_groups[0]['lr']}")
llpgan_train_by_bag(gen,
dis,
gen_opt,
dis_opt,
dis_sch,
gen_sch,
args.noise_dim,
train_loader,
epoch,
device,
logger)
if test_loader is not None:
acc, test_error = test_llpgan(dis, test_loader, loss_f_test, device)
logger.info(f" test_error = {test_error}, accuracy = {100 * acc}%")
json_data['epoch_vs_test_accuracy'].append({'epoch': epoch, 'test_acc': acc, 'test_error': test_error})
| 1,574 | 35.627907 | 120 | py |
LLPFC | LLPFC-main/utils.py | import argparse
import pickle
import random # set the random seed for torchvision
import numpy as np
import torch
from models.NIN import NIN
from models.WideRes import wide_resnet_d_w
from models.ResNet import resnet18
from models.vgg import vgg19_bn, vgg16_bn
from models.densenet import densenet121
from models.LLPGAN_GEN import LLPGAN_GEN_MNIST, LLPGAN_GEN_COLOR
from llpfclib.utils import FORWARD_CORRECT_MNIST, FORWARD_CORRECT_CIFAR10, FORWARD_CORRECT_SVHN
from kllib.utils import KL_CIFAR10, KL_SVHN, KL_EMNIST
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
class InvalidArguments(Exception):
pass
def get_args():
parser = argparse.ArgumentParser(description="train a model on LLP data")
# required:
parser.add_argument("-d",
"--dataset",
nargs='?',
choices=["cifar10", "svhn", "emnist_letters"],
required=True,
help="name of the dataset, the program uses torchvision.datasets")
parser.add_argument("-p",
"--path_lp",
nargs='?',
required=True,
help="path to the label proportion dataset generated by make_data.py")
parser.add_argument("-c", "--num_classes", nargs='?', type=int, required=True, help="number of classes")
parser.add_argument("-f",
"--data_folder_labeled",
nargs='?',
required=True,
help="path to the folder of labeled test data, if not exists, the dataset will be downloaded")
parser.add_argument("-log", "--logging_filename", nargs='?', required=True, help="path to save the log file")
# optional:
parser.add_argument("-a",
"--algorithm",
nargs='?',
choices=["llpfc", "kl", "llpvat", "llpgan"],
default="llpfc",
help="choose a training algorithm")
parser.add_argument("-n",
"--network",
nargs='?',
choices=["wide_resnet_d_w", "nin", "ResNet18", "vgg19_bn", "vgg16_bn", "densenet121"],
default="wide_resnet_d_w",
help="the neural network model")
parser.add_argument("-wrnd", "--WideResNet_depth", nargs='?', type=int, default=28)
parser.add_argument("-wrnw", "--WideResNet_width", nargs='?', type=int, default=2)
parser.add_argument("-dr",
"--drop_rate",
nargs="?",
type=float,
default=0.3,
help="the drop rate in dropout layers, for wide resnet") # add more to this
parser.add_argument("-o",
"--optimizer",
nargs="?",
default="Adamax",
choices=["Adamax", "LBFGS", "Adagrad", "nesterov", "AdamW", "SGD"],
help="optimizer of the neural network")
parser.add_argument("-ams",
"--amsgrad",
nargs="?",
type=int,
default=0,
choices=[0, 1],
help="whether to use the AMSGrad variant of this algorithm")
parser.add_argument("-l", "--lr", nargs="?", type=float, default=1e-3, help="learning rate")
parser.add_argument("-m", "--momentum", nargs="?", type=float, default=0.9, help="momentum")
parser.add_argument("-wd", "--weight_decay", nargs="?", type=float, default=0, help="weight decay")
parser.add_argument("-e",
"--total_epochs",
nargs="?",
type=int,
default=200,
help="total number of epochs to train")
parser.add_argument("-r",
"--num_epoch_regroup",
nargs="?",
type=int,
default=20,
help="groups will be regenerated every this number of epochs, "
"only effective if the algorithm is llpfc")
parser.add_argument("-np",
"--noisy_prior_choice",
nargs="?",
type=str,
default="approx",
choices=["approx", "uniform", "merge"],
help="the heuristics to estimate the noisy prior for each group, "
"approx solves the constrained optimization and uniform assigns uniform noisy priors")
parser.add_argument("-v",
"--validate",
nargs='?',
type=int,
default=0,
choices=[0, 1],
help="if True, then validate on 10%% of the training data set; "
"if False, output testing loss and accuracy while training"
)
parser.add_argument("-b", "--train_batch_size", nargs='?', type=int, default=128, help="training batch size")
parser.add_argument("-t", "--test_batch_size", nargs="?", type=int, default=256, help="test batch size")
parser.add_argument("-s",
"--save_path",
nargs='?',
default=None,
help="path to save the trained model, model will not be saved if the path is None")
parser.add_argument("-dv",
"--device",
nargs='?',
default="check",
choices=["cuda", "cpu", "check"],
help="device to train network; if it's check, use cuda whenever it's available")
parser.add_argument("-w",
"--weights",
nargs='?',
choices=["uniform", "ch_vol"],
default="uniform",
help="set the weights for each group in llpfc")
parser.add_argument("-sc",
"--scheduler",
nargs='?',
choices=["drop", "CAWR"],
default="drop",
help="set the scheduler of training lr")
parser.add_argument("-ms",
"--milestones",
nargs='+',
type=int,
default=[],
help="number of epochs to drop lr if --scheduler is set to be 'drop'")
parser.add_argument("-ga",
"--gamma",
nargs='?',
type=float,
default=0.1,
help="drop the learning rate by this factor if --scheduler is set to be 'drop'")
parser.add_argument("-T0",
"--T_0",
nargs='?',
type=int,
default=10,
help="parameter of the CAWR scheduler")
parser.add_argument("-Tm", "--T_mult", nargs='?', type=int, default=1, help="parameter of the CAWR scheduler")
parser.add_argument("--seed", nargs='?', type=int, help="seed for all RNG")
parser.add_argument("-fr",
"--full_reproducibility",
nargs='?',
type=int,
default=0,
choices=[0, 1],
help="choose to disable all nondeterministic algorithms, may at the cost of performance, "
" decrypted from now")
# xi=1e-6, eps=6.0, ip=1
parser.add_argument("-xi",
"--vat_xi",
nargs='?',
type=float,
default=1e-6,
help="parameter for vat loss, effective only algorithm=llpvat")
parser.add_argument("-eps",
"--vat_eps",
nargs='?',
type=float,
default=6.0,
help="parameter for vat loss, effective only algorithm=llpvat")
parser.add_argument("-ip",
"--vat_ip",
nargs='?',
type=float,
default=1,
help="parameter for vat loss, effective only algorithm=llpvat")
parser.add_argument("-nd",
"--noise_dim",
nargs='?',
type=int,
default=500,
help="parameter for llpgan, the input dimension of the generator")
parser.add_argument("-js",
"--path_to_json",
nargs='?',
type=str,
default=None,
help="will write the training results to this path if provided, write nothing if is none")
return parser.parse_args()
def set_reproducibility(args):
# random seed:
if args.seed is not None:
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
random.seed(args.seed)
# use deterministic algorithms
if args.full_reproducibility:
# look https://pytorch.org/docs/stable/notes/randomness.html for references
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
def set_device(args):
# configure device
if args.device == "check":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device(args.device)
return device
def set_data_and_model(args):
# read the training data
llp_data = pickle.load(open(args.path_lp, "rb"))
if args.dataset == "cifar10":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)), # mean-std of cifar10
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)), # mean-std of cifar10
])
test_dataset = torchvision.datasets.CIFAR10(root=args.data_folder_labeled, train=False,
transform=transform_test, download=True)
image_size = 32
in_channel = 3
elif args.dataset == "svhn":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614)), # mean-std of svhn
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614)), # mean-std of svhn
])
test_dataset = torchvision.datasets.SVHN(root=args.data_folder_labeled, split='test',
transform=transform_test, download=True)
image_size = 32
in_channel = 3
elif args.dataset == "emnist_letters":
image_size = 28
in_channel = 1
transform_train = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1736, ), (0.3317, )), # mean-std of emnist
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1736, ), (0.3317, )), # mean-std of emnist
])
if (args.network == "densenet121") or (len(args.network) >=3 and args.network[:3] == "vgg"):
transform_train = transforms.Compose([
transforms.Resize(32), # resize the image for dense net as it has too many pool layers
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1736,), (0.3317,)), # mean-std of emnist
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1736,), (0.3317,)), # mean-std of emnist
])
image_size = 32
test_dataset = torchvision.datasets.EMNIST(root=args.data_folder_labeled, split="letters", train=False,
transform=transform_test, download=True)
test_dataset.targets = test_dataset.targets - 1 # the labels range originally from 1 to 26
else:
raise InvalidArguments("Unknown dataset name: ", args.dataset)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.test_batch_size, shuffle=False)
num_classes = args.num_classes
return_features = False
if args.algorithm == "llpgan":
return_features = True
if args.network == "wide_resnet_d_w":
model = wide_resnet_d_w(d=args.WideResNet_depth,
w=args.WideResNet_width,
dropout_rate=args.drop_rate,
num_classes=num_classes,
in_channel=in_channel,
image_size=image_size,
return_features=return_features
)
elif args.network == "nin":
model = NIN(num_classes=num_classes,
image_size=image_size,
in_channel=in_channel,
)
if args.algorithm == "llpgan":
raise InvalidArguments("NIN is not compatible with LLPGAN as it has no fully connected layer")
elif args.network == "ResNet18":
model = resnet18(num_classes, in_channel, return_features=return_features)
elif args.network == "vgg19_bn":
model = vgg19_bn(num_classes, in_channel, return_features=return_features)
elif args.network == "vgg16_bn":
model = vgg16_bn(num_classes, in_channel, return_features=return_features)
elif args.network == "densenet121":
model = densenet121(num_classes, in_channel, memory_efficient=False, return_features=return_features)
else:
raise InvalidArguments("Unknown selection of network: ", args.network)
return llp_data, transform_train, num_classes, model, test_loader
def set_optimizer(args, model, total_epochs):
if args.optimizer == "Adamax":
optimizer = optim.Adamax(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08,
weight_decay=args.weight_decay)
elif args.optimizer == "Adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == "LBFGS":
optimizer = optim.LBFGS(model.parameters(), lr=args.lr) # ToDo: implement the closure function and test
elif args.optimizer == "nesterov":
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=True)
elif args.optimizer == "SGD":
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=False)
elif args.optimizer == "AdamW":
optimizer = optim.AdamW(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08,
weight_decay=args.weight_decay, amsgrad=bool(args.amsgrad))
else:
raise InvalidArguments("Unknown selection of optimizer: ", args.optimizer)
if args.scheduler == "drop":
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
elif args.scheduler == "CAWR":
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=args.T_0, T_mult=args.T_mult)
else:
raise InvalidArguments("Unknown selection of scheduler: ", args.scheduler)
return optimizer, scheduler
def set_dataset_class(args):
competitors = ["kl", "llpvat", "llpgan"]
if args.algorithm == "llpfc" and args.dataset == "cifar10":
return FORWARD_CORRECT_CIFAR10
elif args.algorithm == "llpfc" and args.dataset == "svhn":
return FORWARD_CORRECT_SVHN
elif args.algorithm == "llpfc" and args.dataset == "emnist_letters":
return FORWARD_CORRECT_MNIST
elif (args.algorithm in competitors) and args.dataset == "cifar10":
return KL_CIFAR10
elif (args.algorithm in competitors) and args.dataset == "svhn":
return KL_SVHN
elif (args.algorithm in competitors) and args.dataset == "emnist_letters":
return KL_EMNIST
raise InvalidArguments("Unknown combination of llp algorithm and dataset"
": (%s, %s)" % (args.algorithm, args.dataset))
def set_generator(args):
# return a tuple of (generator, optimizer of generator, )
if args.dataset in ["cifar10", "svhn"]:
return LLPGAN_GEN_COLOR(args.noise_dim)
elif args.dataset == "emnist_letters":
if (args.network == "densenet121") or (len(args.network) >= 3 and args.network[:3] == "vgg"):
return LLPGAN_GEN_MNIST(args.noise_dim, 32, 32)
return LLPGAN_GEN_MNIST(args.noise_dim, 28, 28)
else:
raise InvalidArguments("Unknown choice of dataset: %s" % args.dataset)
| 18,095 | 46.248042 | 127 | py |
LLPFC | LLPFC-main/kl.py | import torch
import torch.nn as nn
from kllib.train_fun import kl_train_by_bag, validate_model_kl
from llpfclib.train_fun import test_model
def loss_f_test(x, y, device, epsilon=1e-8):
x = torch.clamp(x, epsilon, 1 - epsilon)
return nn.functional.nll_loss(torch.log(x), y, reduction='sum')
def kl(model, optimizer, train_loader, scheduler, num_epochs, val_loader, test_loader, device, logger, json_data):
for epoch in range(num_epochs):
logger.info(f"Epoch-{epoch}")
logger.info(f" lr: {optimizer.param_groups[0]['lr']}")
kl_train_by_bag(model, optimizer, train_loader, epoch, device, scheduler, logger)
if test_loader is not None:
acc, test_error = test_model(model, test_loader, loss_f_test, device)
logger.info(f" test_error = {test_error}, accuracy = {100 * acc}%")
json_data['epoch_vs_test_accuracy'].append({'epoch': epoch, 'test_acc': acc, 'test_error': test_error})
if val_loader is not None:
val_error = validate_model_kl(model, val_loader, device)
logger.info(f" val_error = {val_error}")
| 1,134 | 46.291667 | 115 | py |
LLPFC | LLPFC-main/make_data.py | import argparse
from llpfclib.make_bags import make_bags_dirichlet, InsufficientDataPoints, make_bags_uniform, truncate_data
import torchvision
import pickle
import os
import random # set random seed
import numpy as np # set random seed
class InvalidArguments(Exception):
pass
def get_args():
parser = argparse.ArgumentParser(description="Partition data into bags for LLP")
# required:
parser.add_argument("-d", "--dataset", nargs='?', choices=["cifar10", "svhn", "emnist_letters"], required=True,
help="name of the dataset, the program uses torchvision.datasets") # ToDo: add more data sets later
parser.add_argument("-c", "--num_classes", nargs='?', type=int, required=True, metavar="10",
help="number of classes")
parser.add_argument("-s", "--data_save_name", nargs='?', required=True, metavar="cifar10_1024_0",
help="name of the file to save")
# optional:
parser.add_argument("-l", "--data_folder_labeled", nargs='?', default="../data/labeled_data/",
metavar="../data/labeled_data/",
help="path to the folder of original data, if not exists, the dataset will be downloaded")
parser.add_argument("-p", "--data_folder_llp", nargs='?', default="../data/llp_data/", metavar="../data/llp_data/",
help="path to save the training data for llp")
parser.add_argument("-m", "--method", nargs='?', default="dirichlet", choices=["dirichlet", "uniform"],
help="method to generate bags") # dirichlet, uniform
parser.add_argument("-a", "--alpha", nargs="?", default="equal", choices=["equal"],
help="parameter of dirichlet distribution; required if use dirichlet to generate bags") # Todo: add more to this
parser.add_argument("-b", "--bag_size", nargs='?', type=int, metavar="1024",
help="size of bag; note not all bag sizes will equal to this number if use dirichlet")
parser.add_argument("-n", "--num_bags", nargs='?', type=int, metavar="100",
help="number of bags to generate; it too large, the dataset may have insufficient data points")
parser.add_argument("-r", "--seed", nargs='?', type=int, metavar="0", help="seed for all RNG") # both random and numpy.random will use this seed
return parser.parse_args()
def main(args):
if args.dataset == "cifar10":
train_dataset = torchvision.datasets.CIFAR10(root=args.data_folder_labeled, train=True, download=True)
labels = train_dataset.targets
elif args.dataset == "svhn":
train_dataset = torchvision.datasets.SVHN(root=args.data_folder_labeled, split="train", download=True)
labels = train_dataset.labels
elif args.dataset == "emnist_letters":
train_dataset = torchvision.datasets.EMNIST(root=args.data_folder_labeled, split="letters", train=True,
download=True)
labels = train_dataset.targets - 1 # the labels range originally from 1 to 26
else:
raise InvalidArguments("Unknown dataset name: ", args.dataset)
if args.method == "dirichlet":
if args.alpha == "equal":
alpha = tuple([1 for _ in range(args.num_classes)])
else:
raise InvalidArguments("Unknown choice of alpha: ", args.alpha)
flag = 1
fail_counter = 0
while flag:
try:
bag2indices, bag2size, bag2prop = make_bags_dirichlet(labels, num_classes=args.num_classes,
bag_size=args.bag_size, num_bags=args.num_bags,
alpha=alpha)
flag = 0
except InsufficientDataPoints:
flag = 1
fail_counter += 1
if fail_counter >= 100:
raise InsufficientDataPoints("THE DATA GENERATION PROCESS FAILS FOR 100 TIMES CONSECUTIVELY. "
"PLEASE CHECK ARGUMENTS OF --alpha %s, --bag_size %d, --num_bags %d"
% (args.alpha, args.bag_size, args.num_bags))
continue
elif args.method == "uniform":
bag2indices, bag2size, bag2prop = make_bags_uniform(train_dataset.targets, args.num_classes, args.bag_size,
args.num_bags)
else:
raise InvalidArguments("Unknown method to generate bags: ", args.method)
print("%d of bags generated, each bag has size %d, the random seed is %d, data is saved as %s" %
(len(bag2indices.keys()), len(bag2indices[0]), args.seed, args.data_save_name))
training_data, bag2indices = truncate_data(train_dataset.data, bag2indices)
to_save = [training_data, bag2indices, bag2size, bag2prop]
with open(os.path.join(args.data_folder_llp, args.data_save_name), 'wb') as f:
pickle.dump(to_save, f)
if __name__ == "__main__":
args = get_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
main(args)
| 5,251 | 52.050505 | 149 | py |
LLPFC | LLPFC-main/llpfclib/utils.py | import torch
from PIL import Image
from torch.utils.data import Sampler
import numpy as np
def truncate_data_group(x, y, instance2group):
idx_list = []
for i in range(x.shape[0]):
if instance2group[i] != -1:
idx_list.append(i)
x_truncated = x[idx_list]
y_truncated = y[idx_list]
idx2new = {idx_list[i]: i for i in range(len(idx_list))}
instance2group_new = {}
for old, new in idx2new.items():
instance2group_new[new] = instance2group[old]
new2idx = {idx2new[idx]: idx for idx in idx2new.keys()}
return x_truncated, y_truncated, instance2group_new, new2idx
class LLPFC_DATASET_BASE(torch.utils.data.Dataset):
def __init__(self, data, noisy_y, group2transition, instance2weight, instance2group, transform):
self.data, self.noisy_y, self.instance2group, self.new2idx = truncate_data_group(data, noisy_y, instance2group)
self.group2transition = group2transition
self.instance2weight = instance2weight
self.transform = transform
def __len__(self):
return len(self.data)
class FORWARD_CORRECT_CIFAR10(LLPFC_DATASET_BASE):
def __getitem__(self, index):
img, y_ = self.data[index], self.noisy_y[index]
trans_m = self.group2transition[self.instance2group[index]]
weight = self.instance2weight[self.new2idx[index]]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, int(y_), torch.tensor(trans_m, dtype=None), weight
class FORWARD_CORRECT_SVHN(LLPFC_DATASET_BASE):
def __getitem__(self, index):
img, y_ = self.data[index], self.noisy_y[index]
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
trans_m = self.group2transition[self.instance2group[index]]
weight = self.instance2weight[self.new2idx[index]]
if self.transform is not None:
img = self.transform(img)
return img, int(y_), torch.tensor(trans_m, dtype=None), weight
class FORWARD_CORRECT_MNIST(LLPFC_DATASET_BASE): # this should work for both EMNIST and MNIST
def __getitem__(self, index):
img, y_ = self.data[index], self.noisy_y[index]
trans_m = self.group2transition[self.instance2group[index]]
weight = self.instance2weight[self.new2idx[index]]
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
return img, int(y_), torch.tensor(trans_m, dtype=None), weight
| 2,296 | 34.890625 | 113 | py |
LLPFC | LLPFC-main/llpfclib/train_fun.py | import torch
import torch.nn as nn
def test_model(model, test_loader, criterion, device):
# test a model with fully label dataset
model.eval()
with torch.no_grad():
correct = 0
total = 0
total_loss = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
prob = nn.functional.softmax(outputs, dim=1)
loss = criterion(prob, labels, device)
total_loss += loss.item()
return correct / total, total_loss / total
def validate_model_forward(model, loss_f_val, val_loader, device):
model.eval()
total_loss = 0
total = 0
for i, (images, noisy_y, trans_m, weights) in enumerate(val_loader):
total_loss += compute_forward_loss_on_minibatch(model, loss_f_val, images, noisy_y, trans_m, weights, device).item()
total += noisy_y.size(0)
return total_loss / total
def train_model_forward_one_epoch(model, loss_f, optimizer, train_loader, device, epoch, scheduler, logger):
# train the model one epoch with forward correction
# label input of loss_f must be an integer
model.train()
total_step = len(train_loader)
for i, (images, noisy_y, trans_m, weights) in enumerate(train_loader):
loss = compute_forward_loss_on_minibatch(model, loss_f, images, noisy_y, trans_m, weights, device)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
logger.info(' Step [{}/{}], Loss: {:.4f}'.format(i + 1, total_step, loss.item()))
if type(scheduler) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts:
scheduler.step(epoch + i / total_step)
if type(scheduler) == torch.optim.lr_scheduler.MultiStepLR:
scheduler.step()
elif type(scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(validate_model_forward(model, loss_f, train_loader, device))
def compute_forward_loss_on_minibatch(model, loss_f, images, noisy_y, trans_m, weights, device):
# Move tensors to the configured device
images = images.to(device)
noisy_y = noisy_y.to(device)
trans_m = trans_m.to(device)
weights = weights.to(device)
# Forward pass
outputs = model(images)
prob = nn.functional.softmax(outputs, dim=1)
prob_corrected = torch.bmm(trans_m.float(), prob.reshape(prob.shape[0], -1, 1)).reshape(prob.shape[0], -1)
loss = loss_f(prob_corrected, noisy_y, weights, device)
return loss
| 2,465 | 34.73913 | 118 | py |
LLPFC | LLPFC-main/models/ResNet.py | # code in this file is modified from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int,
in_channel: int, # specify color/gray image
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
return_features: bool = False,
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channel, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
self.return_features = return_features
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
features = torch.flatten(x, 1)
out = self.fc(features)
if self.return_features:
return out, features
return out
def forward(self, x: Tensor):
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int, in_channel: int, **kwargs: Any
) -> ResNet:
model = ResNet(block, layers, num_classes, in_channel, **kwargs)
return model
def resnet18(num_classes: int, in_channel: int, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], num_classes, in_channel, **kwargs)
| 9,492 | 36.820717 | 111 | py |
LLPFC | LLPFC-main/models/LLPGAN_DIS.py | import torch.nn as nn
class LLPGAN_DIS(nn.Module):
# use the same discriminator as LLP-GAN paper
def __init__(self, num_class, image_size, in_channel=3, return_features=False):
super(LLPGAN_DIS, self).__init__()
self.conv_layers = nn.Sequential(
nn.Dropout(p=0.2, ),
nn.Conv2d(in_channel, 64, 3, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
nn.ReLU(),
nn.Dropout(p=0.5, ),
nn.Conv2d(64, 128, 3, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, padding=1, stride=2),
nn.ReLU(),
nn.Dropout(p=0.5, ),
nn.Conv2d(128, 256, 3, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(256, 128, 1, padding=0, stride=1),
nn.ReLU(),
nn.Conv2d(128, 64, 1, padding=0, stride=1),
nn.ReLU(),
)
if isinstance(image_size, int):
pool_size = round(round(image_size/2.0)/2.0)
else:
pool_size = (round(round(image_size[0]/2.0)/2.0), round(round(image_size[1]/2.0)/2.0))
self.pool_layer = nn.AvgPool2d(pool_size, stride=pool_size, )
self.fc_layer = nn.Linear(64, num_class, bias=True)
self.return_features = return_features
def forward(self, x):
x = self.conv_layers(x)
features = self.pool_layer(x).reshape(-1, 64)
out = self.fc_layer(features)
if self.return_features:
return out, features
return out
| 1,421 | 29.913043 | 89 | py |
LLPFC | LLPFC-main/models/vgg.py | # code in this file is modified from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
import torch
import torch.nn as nn
from typing import Union, List, Dict, Any, cast
class VGG(nn.Module):
def __init__(self,
features: nn.Module,
num_classes: int = 1000,
init_weights: bool = True,
return_features: bool = False
) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
self.return_features = return_features
def forward(self, x: torch.Tensor):
x = self.features(x)
x = self.avgpool(x)
features = torch.flatten(x, 1)
out = self.classifier(features)
if self.return_features:
return out, features
return out
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(in_channels: int, cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = in_channels
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(num_classes: int, in_channels: int, cfg: str, batch_norm: bool, **kwargs: Any) -> VGG:
model = VGG(make_layers(in_channels, cfgs[cfg], batch_norm=batch_norm), num_classes=num_classes, **kwargs)
return model
def vgg19_bn(num_classes: int, in_channels: int, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
"""
return _vgg(num_classes, in_channels, 'E', True, **kwargs)
def vgg16_bn(num_classes: int, in_channels: int, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
"""
return _vgg(num_classes, in_channels, 'D', True, **kwargs)
| 3,722 | 37.78125 | 114 | py |
LLPFC | LLPFC-main/models/WideRes.py | # The code in this file is modified from https://github.com/kevinorjohn/LLP-VAT
# MIT License
#
# Copyright (c) 2020 Kuen-Han Tsai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def wide_resnet_d_w(d, w, **kwargs):
net = WideResNet(d, w, **kwargs)
net.apply(conv_init)
return net
class GaussianNoise(nn.Module):
""" add gasussian noise into feature """
def __init__(self, std):
super(GaussianNoise, self).__init__()
self.std = std
def forward(self, x):
zeros_ = torch.zeros_like(x)
n = torch.normal(zeros_, std=self.std)
return x + n
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes,
planes,
kernel_size=3,
padding=1,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes,
planes,
kernel_size=1,
stride=stride,
bias=True), )
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes, in_channel, image_size, return_features=False):
super(WideResNet, self).__init__()
self.in_planes = 16
if image_size == 32: # CIFAR10, SVHN
self.pool_size = 8
elif image_size == 28: # EMNIST
self.pool_size = 7
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) // 6
k = widen_factor
print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(in_channel, nStages[0])
self.layer1 = self._wide_layer(WideBasic,
nStages[1],
n,
dropout_rate,
stride=1)
self.layer2 = self._wide_layer(WideBasic,
nStages[2],
n,
dropout_rate,
stride=2)
self.layer3 = self._wide_layer(WideBasic,
nStages[3],
n,
dropout_rate,
stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
self.return_features = return_features
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, self.pool_size)
features = out.view(out.size(0), -1)
out = self.linear(features)
if self.return_features:
return out, features
return out
| 5,794 | 35.21875 | 118 | py |
LLPFC | LLPFC-main/models/densenet.py | # code in this file is modified from https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
from typing import Any, List, Tuple
class _DenseLayer(nn.Module):
def __init__(self,
num_input_features: int,
growth_rate: int,
bn_size: int,
drop_rate: float,
memory_efficient: bool = False
) -> None:
super(_DenseLayer, self).__init__()
self.norm1: nn.BatchNorm2d
self.add_module('norm1', nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module('relu1', nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False))
self.norm2: nn.BatchNorm2d
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate))
self.relu2: nn.ReLU
self.add_module('relu2', nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False))
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor:
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor:
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False
) -> None:
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(self,
num_classes: int,
in_channels: int,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
memory_efficient: bool = False,
return_features: bool = False,
) -> None:
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_channels, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
self.return_features = return_features
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
if self.return_features:
return out, features
return out
def _densenet(num_classes: int, in_channels: int, growth_rate: int, block_config: Tuple[int, int, int, int],
num_init_features: int, **kwargs: Any) -> DenseNet:
model = DenseNet(num_classes, in_channels, growth_rate, block_config, num_init_features, **kwargs)
return model
def densenet121(num_classes: int, in_channels: int, **kwargs: Any) -> DenseNet:
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
return _densenet(num_classes, in_channels, 32, (6, 12, 24, 16), 64, **kwargs) | 9,286 | 39.911894 | 113 | py |
LLPFC | LLPFC-main/models/NIN.py | # code in this file is modified from https://github.com/yangqiongyongyu/Network-In-Network-Pytorch/blob/master/models/nin.py
# hyperparameters selected based on https://worksheets.codalab.org/worksheets/0x7b8f6fbc6b5c49c18ac7ca94aafaa1a7
import torch.nn as nn
import math
class NIN(nn.Module):
def __init__(self, num_classes, image_size, in_channel=3):
super(NIN, self).__init__()
self.num_classes = num_classes
if isinstance(image_size, int):
pool_size = math.ceil(math.ceil(image_size/2.0-0.5)/2.0-0.5)
else:
pool_size = (math.ceil(math.ceil(image_size[0]/2.0-0.5)/2.0-0.5), math.ceil(math.ceil(image_size[1]/2.0-0.5)/2.0-0.5))
self.features = nn.Sequential(
nn.Conv2d(in_channel, 192, 5, padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(192, 160, 1),
nn.ReLU(inplace=True),
nn.Conv2d(160, 96, 1),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride=2, ceil_mode=True),
nn.Dropout(inplace=True),
nn.Conv2d(96, 192, 5, padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, 1),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, 1),
nn.ReLU(inplace=True),
nn.AvgPool2d(3, stride=2, ceil_mode=True),
nn.Dropout(inplace=True),
nn.Conv2d(192, 192, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, 1),
nn.ReLU(inplace=True),
nn.Conv2d(192, self.num_classes, 1),
nn.ReLU(inplace=True),
nn.AvgPool2d(pool_size, stride=1)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), self.num_classes)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.05)
if m.bias is not None:
m.bias.data.zero_() | 1,708 | 30.648148 | 124 | py |
LLPFC | LLPFC-main/models/LLPGAN_GEN.py | import torch.nn as nn
class LLPGAN_GEN_MNIST(nn.Module):
def __init__(self, noise_size=100, out_h=28, out_w=28):
self.out_h, self.out_w = out_h, out_w
super(LLPGAN_GEN_MNIST, self).__init__()
self.model = nn.Sequential(
nn.Linear(noise_size, 500),
nn.ReLU(),
nn.BatchNorm1d(500, eps=1e-05, momentum=0.1, ),
nn.Linear(500, 500),
nn.ReLU(),
nn.BatchNorm1d(500, eps=1e-05, momentum=0.1, ),
nn.Linear(500, self.out_h * self.out_w),
nn.ReLU(),
nn.BatchNorm1d(self.out_h * self.out_w, eps=1e-05, momentum=0.1, ),
)
def forward(self, noise):
return self.model(noise).reshape(-1, 1, self.out_h, self.out_w)
class LLPGAN_GEN_COLOR(nn.Module):
def __init__(self, noise_size=32*32):
super(LLPGAN_GEN_COLOR, self).__init__()
self.linear = nn.Sequential(
nn.Linear(noise_size, 4*4*512),
nn.ReLU(),
nn.BatchNorm1d(4*4*512, eps=1e-05, momentum=0.1, )
)
self.trans_conv = nn.Sequential(
nn.ConvTranspose2d(512, 256, (5, 5)),
nn.ReLU(),
nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, ),
nn.ConvTranspose2d(256, 128, (5, 5)),
nn.ReLU(),
nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, ),
nn.ConvTranspose2d(128, 3, (5, 5)),
nn.ReLU(),
nn.BatchNorm2d(3, eps=1e-05, momentum=0.1, ),
)
def forward(self, noise):
out = self.linear(noise)
return self.trans_conv(out.reshape((-1, 512, 4, 4)))
| 1,640 | 29.962264 | 79 | py |
LLPFC | LLPFC-main/llpvatlib/utils.py | # code in this file is modified from https://github.com/kevinorjohn/LLP-VAT/blob/a111d6785e8b0b79761c4d68c5b96288048594d6/llp_vat/
import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
@contextlib.contextmanager
def _disable_tracking_bn_stats(model):
def switch_attr(m):
if hasattr(m, 'track_running_stats'):
m.track_running_stats ^= True
model.apply(switch_attr)
yield
model.apply(switch_attr)
def _l2_normalize(d):
d_reshaped = d.view(d.shape[0], -1, *(1 for _ in range(d.dim() - 2)))
d /= torch.norm(d_reshaped, dim=1, keepdim=True) + 1e-8
return d
class VATLoss(nn.Module):
def __init__(self, xi, eps, ip):
"""VAT loss
:param xi: hyperparameter of VAT (default: 10.0)
:param eps: hyperparameter of VAT (default: 1.0)
:param ip: iteration times of computing adv noise (default: 1)
"""
super(VATLoss, self).__init__()
self.xi = xi
self.eps = eps
self.ip = ip
def forward(self, model, x):
with torch.no_grad():
pred = F.softmax(model(x), dim=1)
# prepare random unit tensor
# d = torch.rand(x.shape).sub(0.5).to(x.device)
d = torch.randn_like(x)
d = _l2_normalize(d)
with _disable_tracking_bn_stats(model):
# calc adversarial direction
for _ in range(self.ip):
d.requires_grad_()
pred_hat = model(x + self.xi * d)
logp_hat = F.log_softmax(pred_hat, dim=1)
adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean')
adv_distance.backward()
d = _l2_normalize(d.grad)
model.zero_grad()
# calc LDS
r_adv = d * self.eps
pred_hat = model(x + r_adv)
logp_hat = F.log_softmax(pred_hat, dim=1)
lds = F.kl_div(logp_hat, pred, reduction='batchmean')
return lds
| 1,989 | 30.587302 | 130 | py |
LLPFC | LLPFC-main/llpvatlib/train_fun.py | import torch
from kllib.train_fun import compute_kl_loss_on_bagbatch
import numpy as np
def sigmoid_rampup(current, rampup_length):
# modified from https://github.com/kevinorjohn/LLP-VAT/blob/a111d6785e8b0b79761c4d68c5b96288048594d6/llp_vat/
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def get_rampup_weight(weight, iteration, rampup):
# modified from https://github.com/kevinorjohn/LLP-VAT/blob/a111d6785e8b0b79761c4d68c5b96288048594d6/llp_vat/
alpha = weight * sigmoid_rampup(iteration, rampup)
return alpha
def llp_loss_f(model, images, props, vat_loss_f, iteration, device):
prop_loss = compute_kl_loss_on_bagbatch(model, images, props, device)
alpha = get_rampup_weight(0.05, iteration, -1) # hard-coded based on tsai and lin's implementation
vat_loss = vat_loss_f(model,
torch.reshape(images, (-1, images.shape[-3], images.shape[-2], images.shape[-1])).to(device))
return prop_loss, alpha, vat_loss
def llpvat_train_by_bag(model, optimizer, train_loader, vat_loss_f, epoch, device, scheduler, logger):
model.train()
total_step = len(train_loader)
for i, (images, props) in enumerate(train_loader):
prop_loss, alpha, vat_loss = llp_loss_f(model, images, props, vat_loss_f, i, device)
loss = prop_loss + alpha * vat_loss
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
logger.info(' Step [{}/{}], Loss: {:.4f}'.format(i + 1, total_step, loss.item()))
logger.info(' VAT Loss: {:.4f}'.format(vat_loss.item()))
logger.info(' KL Loss: {:.4f}'.format(prop_loss.item()))
logger.info(' alpha = {:.4f}'.format(alpha))
if type(scheduler) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts:
scheduler.step(epoch + i / total_step)
if type(scheduler) == torch.optim.lr_scheduler.MultiStepLR:
scheduler.step()
| 2,296 | 44.94 | 119 | py |
LLPFC | LLPFC-main/kllib/utils.py | from PIL import Image
import torch
import numpy as np
class KL_DATASET_BASE(torch.utils.data.Dataset):
def __init__(self, data, bag2indices, bag2prop, transform):
self.data = data
self.bag2indices = bag2indices
self.bag2prop = bag2prop
self.transform = transform
def __len__(self):
return len(self.bag2indices.keys())
class KL_CIFAR10(KL_DATASET_BASE):
def __getitem__(self, bag_index):
indices = self.bag2indices[bag_index]
images = torch.zeros((len(indices), self.data[0].shape[2], self.data[0].shape[0], self.data[0].shape[1]),
dtype=torch.float32)
for i in range(len(indices)):
idx = indices[i]
img = self.data[idx]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
images[i] = img
return images, self.bag2prop[bag_index]
class KL_SVHN(KL_DATASET_BASE):
def __getitem__(self, bag_index):
indices = self.bag2indices[bag_index]
images = torch.zeros((len(indices), self.data[0].shape[0], self.data[0].shape[1], self.data[0].shape[2],),
dtype=torch.float32)
for i in range(len(indices)):
idx = indices[i]
img = self.data[idx]
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
images[i] = img
return images, self.bag2prop[bag_index]
class KL_EMNIST(KL_DATASET_BASE):
def __init__(self, data, bag2indices, bag2prop, transform):
super(KL_EMNIST, self).__init__(data, bag2indices, bag2prop, transform)
img = self.transform(Image.fromarray(self.data[0].numpy(), mode='L'))
self.new_h = img.shape[1]
self.new_w = img.shape[2] # need this for resized emnist
def __getitem__(self, bag_index):
indices = self.bag2indices[bag_index]
images = torch.zeros((len(indices), 1, self.new_h, self.new_w,), dtype=torch.float32)
for i in range(len(indices)):
idx = indices[i]
img = self.data[idx]
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
images[i] = img
return images, self.bag2prop[bag_index]
| 2,417 | 36.2 | 114 | py |
LLPFC | LLPFC-main/kllib/train_fun.py | import torch
import torch.nn as nn
def compute_kl_loss_on_bagbatch(model, images, props, device, epsilon=1e-8):
# Move tensors to the configured device
images = images.to(device)
props = props.to(device)
# Forward pass
batch_size, bag_size, channel, height, width = images.shape
images = images.reshape((batch_size * bag_size, channel, height, width))
outputs = model(images)
prob = nn.functional.softmax(outputs, dim=-1).reshape((batch_size, bag_size, -1))
avg_prob = torch.mean(prob, dim=1)
avg_prob = torch.clamp(avg_prob, epsilon, 1 - epsilon)
loss = torch.sum(-props * torch.log(avg_prob), dim=-1).mean()
return loss
def validate_model_kl(model, val_loader, device):
model.eval()
total_loss = 0
total = 0
for i, (images, props) in enumerate(val_loader):
total_loss += compute_kl_loss_on_bagbatch(model, images, props, device).item()
total += 1
return total_loss/total
def kl_train_by_bag(model, optimizer, train_loader, epoch, device, scheduler, logger):
model.train()
total_step = len(train_loader)
for i, (images, props) in enumerate(train_loader):
loss = compute_kl_loss_on_bagbatch(model, images, props, device)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
logger.info(' Step [{}/{}], Loss: {:.4f}'.format(i + 1, total_step, loss.item()))
if type(scheduler) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts:
scheduler.step(epoch + i / total_step)
if type(scheduler) == torch.optim.lr_scheduler.MultiStepLR:
scheduler.step()
elif type(scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(validate_model_kl(model, train_loader, device))
| 1,840 | 38.170213 | 107 | py |
LLPFC | LLPFC-main/llpganlib/train_fun.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
from torch.autograd import Variable
def test_llpgan(model, test_loader, criterion, device):
# test a model with fully label dataset
model.eval()
with torch.no_grad():
correct = 0
total = 0
total_loss = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs, _ = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
prob = nn.functional.softmax(outputs, dim=1)
loss = criterion(prob, labels, device)
total_loss += loss.item()
return correct / total, total_loss / total
def compute_dis_loss(dis, true_images, fake_images, props, device, lambd=1, epsilon=1e-8):
# Forward pass
batch_size, bag_size, channel, height, width = true_images.shape
true_images = true_images.reshape((batch_size * bag_size, channel, height, width))
true_outputs, _ = dis(true_images)
fake_outputs, _ = dis(fake_images)
# compute the lower bound of kl
prob = nn.functional.softmax(true_outputs, dim=-1).reshape((batch_size, bag_size, -1))
clamped_prob = torch.clamp(prob, epsilon, 1 - epsilon)
log_prob = torch.log(clamped_prob)
avg_log_prop = torch.mean(log_prob, dim=1)
lower_kl_loss = -torch.sum(-props * avg_log_prop, dim=-1).mean() * lambd
# compute the true/fake binary loss
true_outputs_cat = torch.cat((true_outputs, torch.zeros(true_outputs.shape[0], 1).to(device)), dim=1)
true_prob = 1 - nn.functional.softmax(true_outputs_cat, dim=1)[:, -1]
clamped_true_prob = torch.clamp(true_prob, epsilon, 1 - epsilon)
log_true_prob = torch.log(clamped_true_prob)
avg_log_true_prop = -torch.mean(log_true_prob)
fake_outputs_cat = torch.cat((fake_outputs, torch.zeros(fake_outputs.shape[0], 1).to(device)), dim=1)
fake_prob = nn.functional.softmax(fake_outputs_cat, dim=1)[:, -1]
clamped_fake_prob = torch.clamp(fake_prob, epsilon, 1 - epsilon)
log_fake_prob = torch.log(clamped_fake_prob)
avg_log_fake_prop = -torch.mean(log_fake_prob)
return lower_kl_loss + avg_log_true_prop + avg_log_fake_prop
def compute_gen_loss(dis, true_images, fake_images):
batch_size, bag_size, channel, height, width = true_images.shape
true_images = true_images.reshape((batch_size * bag_size, channel, height, width))
true_outputs, true_features = dis(true_images)
fake_outputs, fake_features = dis(fake_images)
loss = mse_loss(fake_features, true_features)
return loss # also return feature_maps to compute generator loss
def llpgan_train_by_bag(gen,
dis,
gen_opt,
dis_opt,
dis_sch,
gen_sch,
noise_dim,
train_loader,
epoch,
device,
logger
):
gen.train()
dis.train()
total_step = len(train_loader)
for i, (images, props) in enumerate(train_loader):
true_images = images.to(device)
props = props.to(device)
batch_data_points = true_images.shape[0] * true_images.shape[1]
noise = Variable(torch.FloatTensor(np.random.normal(0, 1, (batch_data_points, noise_dim))).to(device))
dis_opt.zero_grad()
dis_loss = compute_dis_loss(dis, true_images, gen(noise).detach(), props, device, lambd=1)
dis_loss.backward()
dis_opt.step()
gen_opt.zero_grad()
gen_loss = compute_gen_loss(dis, true_images, gen(noise))
gen_loss.backward()
gen_opt.step()
if (i + 1) % 100 == 0:
logger.info(' Step [{}/{}], dis Loss: {:.4f}'.format(i + 1, total_step, dis_loss.item()))
logger.info(' gen Loss: {:.4f}'.format(gen_loss.item()))
if type(dis_sch) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts:
dis_sch.step(epoch + i / total_step)
if type(gen_sch) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts:
gen_sch.step(epoch + i / total_step)
if type(dis_sch) == torch.optim.lr_scheduler.MultiStepLR:
dis_sch.step()
if type(gen_sch) == torch.optim.lr_scheduler.MultiStepLR:
gen_sch.step()
| 4,536 | 40.245455 | 115 | py |
DehazeFormer | DehazeFormer-main/test.py | import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_msssim import ssim
from torch.utils.data import DataLoader
from collections import OrderedDict
from utils import AverageMeter, write_img, chw_to_hwc
from datasets.loader import PairLoader
from models import *
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='dehazeformer-s', type=str, help='model name')
parser.add_argument('--num_workers', default=16, type=int, help='number of workers')
parser.add_argument('--data_dir', default='./data/', type=str, help='path to dataset')
parser.add_argument('--save_dir', default='./saved_models/', type=str, help='path to models saving')
parser.add_argument('--result_dir', default='./results/', type=str, help='path to results saving')
parser.add_argument('--dataset', default='RESIDE-IN', type=str, help='dataset name')
parser.add_argument('--exp', default='indoor', type=str, help='experiment setting')
args = parser.parse_args()
def single(save_dir):
state_dict = torch.load(save_dir)['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def test(test_loader, network, result_dir):
PSNR = AverageMeter()
SSIM = AverageMeter()
torch.cuda.empty_cache()
network.eval()
os.makedirs(os.path.join(result_dir, 'imgs'), exist_ok=True)
f_result = open(os.path.join(result_dir, 'results.csv'), 'w')
for idx, batch in enumerate(test_loader):
input = batch['source'].cuda()
target = batch['target'].cuda()
filename = batch['filename'][0]
with torch.no_grad():
output = network(input).clamp_(-1, 1)
# [-1, 1] to [0, 1]
output = output * 0.5 + 0.5
target = target * 0.5 + 0.5
psnr_val = 10 * torch.log10(1 / F.mse_loss(output, target)).item()
_, _, H, W = output.size()
down_ratio = max(1, round(min(H, W) / 256)) # Zhou Wang
ssim_val = ssim(F.adaptive_avg_pool2d(output, (int(H / down_ratio), int(W / down_ratio))),
F.adaptive_avg_pool2d(target, (int(H / down_ratio), int(W / down_ratio))),
data_range=1, size_average=False).item()
PSNR.update(psnr_val)
SSIM.update(ssim_val)
print('Test: [{0}]\t'
'PSNR: {psnr.val:.02f} ({psnr.avg:.02f})\t'
'SSIM: {ssim.val:.03f} ({ssim.avg:.03f})'
.format(idx, psnr=PSNR, ssim=SSIM))
f_result.write('%s,%.02f,%.03f\n'%(filename, psnr_val, ssim_val))
out_img = chw_to_hwc(output.detach().cpu().squeeze(0).numpy())
write_img(os.path.join(result_dir, 'imgs', filename), out_img)
f_result.close()
os.rename(os.path.join(result_dir, 'results.csv'),
os.path.join(result_dir, '%.02f | %.04f.csv'%(PSNR.avg, SSIM.avg)))
if __name__ == '__main__':
network = eval(args.model.replace('-', '_'))()
network.cuda()
saved_model_dir = os.path.join(args.save_dir, args.exp, args.model+'.pth')
if os.path.exists(saved_model_dir):
print('==> Start testing, current model name: ' + args.model)
network.load_state_dict(single(saved_model_dir))
else:
print('==> No existing trained model!')
exit(0)
dataset_dir = os.path.join(args.data_dir, args.dataset)
test_dataset = PairLoader(dataset_dir, 'test', 'test')
test_loader = DataLoader(test_dataset,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True)
result_dir = os.path.join(args.result_dir, args.dataset, args.model)
test(test_loader, network, result_dir) | 3,451 | 30.962963 | 100 | py |
DehazeFormer | DehazeFormer-main/train.py | import os
import argparse
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
from utils import AverageMeter
from datasets.loader import PairLoader
from models import *
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='dehazeformer-s', type=str, help='model name')
parser.add_argument('--num_workers', default=16, type=int, help='number of workers')
parser.add_argument('--no_autocast', action='store_false', default=True, help='disable autocast')
parser.add_argument('--save_dir', default='./saved_models/', type=str, help='path to models saving')
parser.add_argument('--data_dir', default='./data/', type=str, help='path to dataset')
parser.add_argument('--log_dir', default='./logs/', type=str, help='path to logs')
parser.add_argument('--dataset', default='RESIDE-IN', type=str, help='dataset name')
parser.add_argument('--exp', default='indoor', type=str, help='experiment setting')
parser.add_argument('--gpu', default='0,1,2,3', type=str, help='GPUs used for training')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
def train(train_loader, network, criterion, optimizer, scaler):
losses = AverageMeter()
torch.cuda.empty_cache()
network.train()
for batch in train_loader:
source_img = batch['source'].cuda()
target_img = batch['target'].cuda()
with autocast(args.no_autocast):
output = network(source_img)
loss = criterion(output, target_img)
losses.update(loss.item())
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return losses.avg
def valid(val_loader, network):
PSNR = AverageMeter()
torch.cuda.empty_cache()
network.eval()
for batch in val_loader:
source_img = batch['source'].cuda()
target_img = batch['target'].cuda()
with torch.no_grad(): # torch.no_grad() may cause warning
output = network(source_img).clamp_(-1, 1)
mse_loss = F.mse_loss(output * 0.5 + 0.5, target_img * 0.5 + 0.5, reduction='none').mean((1, 2, 3))
psnr = 10 * torch.log10(1 / mse_loss).mean()
PSNR.update(psnr.item(), source_img.size(0))
return PSNR.avg
if __name__ == '__main__':
setting_filename = os.path.join('configs', args.exp, args.model+'.json')
if not os.path.exists(setting_filename):
setting_filename = os.path.join('configs', args.exp, 'default.json')
with open(setting_filename, 'r') as f:
setting = json.load(f)
network = eval(args.model.replace('-', '_'))()
network = nn.DataParallel(network).cuda()
criterion = nn.L1Loss()
if setting['optimizer'] == 'adam':
optimizer = torch.optim.Adam(network.parameters(), lr=setting['lr'])
elif setting['optimizer'] == 'adamw':
optimizer = torch.optim.AdamW(network.parameters(), lr=setting['lr'])
else:
raise Exception("ERROR: unsupported optimizer")
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=setting['epochs'], eta_min=setting['lr'] * 1e-2)
scaler = GradScaler()
dataset_dir = os.path.join(args.data_dir, args.dataset)
train_dataset = PairLoader(dataset_dir, 'train', 'train',
setting['patch_size'], setting['edge_decay'], setting['only_h_flip'])
train_loader = DataLoader(train_dataset,
batch_size=setting['batch_size'],
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
val_dataset = PairLoader(dataset_dir, 'test', setting['valid_mode'],
setting['patch_size'])
val_loader = DataLoader(val_dataset,
batch_size=setting['batch_size'],
num_workers=args.num_workers,
pin_memory=True)
save_dir = os.path.join(args.save_dir, args.exp)
os.makedirs(save_dir, exist_ok=True)
if not os.path.exists(os.path.join(save_dir, args.model+'.pth')):
print('==> Start training, current model name: ' + args.model)
# print(network)
writer = SummaryWriter(log_dir=os.path.join(args.log_dir, args.exp, args.model))
best_psnr = 0
for epoch in tqdm(range(setting['epochs'] + 1)):
loss = train(train_loader, network, criterion, optimizer, scaler)
writer.add_scalar('train_loss', loss, epoch)
scheduler.step()
if epoch % setting['eval_freq'] == 0:
avg_psnr = valid(val_loader, network)
writer.add_scalar('valid_psnr', avg_psnr, epoch)
if avg_psnr > best_psnr:
best_psnr = avg_psnr
torch.save({'state_dict': network.state_dict()},
os.path.join(save_dir, args.model+'.pth'))
writer.add_scalar('best_psnr', best_psnr, epoch)
else:
print('==> Existing trained model')
exit(1)
| 4,879 | 31.972973 | 121 | py |
DehazeFormer | DehazeFormer-main/models/dehazeformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.nn.init import _calculate_fan_in_and_fan_out
from timm.models.layers import to_2tuple, trunc_normal_
class RLN(nn.Module):
r"""Revised LayerNorm"""
def __init__(self, dim, eps=1e-5, detach_grad=False):
super(RLN, self).__init__()
self.eps = eps
self.detach_grad = detach_grad
self.weight = nn.Parameter(torch.ones((1, dim, 1, 1)))
self.bias = nn.Parameter(torch.zeros((1, dim, 1, 1)))
self.meta1 = nn.Conv2d(1, dim, 1)
self.meta2 = nn.Conv2d(1, dim, 1)
trunc_normal_(self.meta1.weight, std=.02)
nn.init.constant_(self.meta1.bias, 1)
trunc_normal_(self.meta2.weight, std=.02)
nn.init.constant_(self.meta2.bias, 0)
def forward(self, input):
mean = torch.mean(input, dim=(1, 2, 3), keepdim=True)
std = torch.sqrt((input - mean).pow(2).mean(dim=(1, 2, 3), keepdim=True) + self.eps)
normalized_input = (input - mean) / std
if self.detach_grad:
rescale, rebias = self.meta1(std.detach()), self.meta2(mean.detach())
else:
rescale, rebias = self.meta1(std), self.meta2(mean)
out = normalized_input * self.weight + self.bias
return out, rescale, rebias
class Mlp(nn.Module):
def __init__(self, network_depth, in_features, hidden_features=None, out_features=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.network_depth = network_depth
self.mlp = nn.Sequential(
nn.Conv2d(in_features, hidden_features, 1),
nn.ReLU(True),
nn.Conv2d(hidden_features, out_features, 1)
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
gain = (8 * self.network_depth) ** (-1/4)
fan_in, fan_out = _calculate_fan_in_and_fan_out(m.weight)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
trunc_normal_(m.weight, std=std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
return self.mlp(x)
def window_partition(x, window_size):
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size**2, C)
return windows
def window_reverse(windows, window_size, H, W):
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def get_relative_positions(window_size):
coords_h = torch.arange(window_size)
coords_w = torch.arange(window_size)
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_positions = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_positions = relative_positions.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_positions_log = torch.sign(relative_positions) * torch.log(1. + relative_positions.abs())
return relative_positions_log
class WindowAttention(nn.Module):
def __init__(self, dim, window_size, num_heads):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
relative_positions = get_relative_positions(self.window_size)
self.register_buffer("relative_positions", relative_positions)
self.meta = nn.Sequential(
nn.Linear(2, 256, bias=True),
nn.ReLU(True),
nn.Linear(256, num_heads, bias=True)
)
self.softmax = nn.Softmax(dim=-1)
def forward(self, qkv):
B_, N, _ = qkv.shape
qkv = qkv.reshape(B_, N, 3, self.num_heads, self.dim // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.meta(self.relative_positions)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, self.dim)
return x
class Attention(nn.Module):
def __init__(self, network_depth, dim, num_heads, window_size, shift_size, use_attn=False, conv_type=None):
super().__init__()
self.dim = dim
self.head_dim = int(dim // num_heads)
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.network_depth = network_depth
self.use_attn = use_attn
self.conv_type = conv_type
if self.conv_type == 'Conv':
self.conv = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=3, padding=1, padding_mode='reflect'),
nn.ReLU(True),
nn.Conv2d(dim, dim, kernel_size=3, padding=1, padding_mode='reflect')
)
if self.conv_type == 'DWConv':
self.conv = nn.Conv2d(dim, dim, kernel_size=5, padding=2, groups=dim, padding_mode='reflect')
if self.conv_type == 'DWConv' or self.use_attn:
self.V = nn.Conv2d(dim, dim, 1)
self.proj = nn.Conv2d(dim, dim, 1)
if self.use_attn:
self.QK = nn.Conv2d(dim, dim * 2, 1)
self.attn = WindowAttention(dim, window_size, num_heads)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
w_shape = m.weight.shape
if w_shape[0] == self.dim * 2: # QK
fan_in, fan_out = _calculate_fan_in_and_fan_out(m.weight)
std = math.sqrt(2.0 / float(fan_in + fan_out))
trunc_normal_(m.weight, std=std)
else:
gain = (8 * self.network_depth) ** (-1/4)
fan_in, fan_out = _calculate_fan_in_and_fan_out(m.weight)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
trunc_normal_(m.weight, std=std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def check_size(self, x, shift=False):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
if shift:
x = F.pad(x, (self.shift_size, (self.window_size-self.shift_size+mod_pad_w) % self.window_size,
self.shift_size, (self.window_size-self.shift_size+mod_pad_h) % self.window_size), mode='reflect')
else:
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward(self, X):
B, C, H, W = X.shape
if self.conv_type == 'DWConv' or self.use_attn:
V = self.V(X)
if self.use_attn:
QK = self.QK(X)
QKV = torch.cat([QK, V], dim=1)
# shift
shifted_QKV = self.check_size(QKV, self.shift_size > 0)
Ht, Wt = shifted_QKV.shape[2:]
# partition windows
shifted_QKV = shifted_QKV.permute(0, 2, 3, 1)
qkv = window_partition(shifted_QKV, self.window_size) # nW*B, window_size**2, C
attn_windows = self.attn(qkv)
# merge windows
shifted_out = window_reverse(attn_windows, self.window_size, Ht, Wt) # B H' W' C
# reverse cyclic shift
out = shifted_out[:, self.shift_size:(self.shift_size+H), self.shift_size:(self.shift_size+W), :]
attn_out = out.permute(0, 3, 1, 2)
if self.conv_type in ['Conv', 'DWConv']:
conv_out = self.conv(V)
out = self.proj(conv_out + attn_out)
else:
out = self.proj(attn_out)
else:
if self.conv_type == 'Conv':
out = self.conv(X) # no attention and use conv, no projection
elif self.conv_type == 'DWConv':
out = self.proj(self.conv(V))
return out
class TransformerBlock(nn.Module):
def __init__(self, network_depth, dim, num_heads, mlp_ratio=4.,
norm_layer=nn.LayerNorm, mlp_norm=False,
window_size=8, shift_size=0, use_attn=True, conv_type=None):
super().__init__()
self.use_attn = use_attn
self.mlp_norm = mlp_norm
self.norm1 = norm_layer(dim) if use_attn else nn.Identity()
self.attn = Attention(network_depth, dim, num_heads=num_heads, window_size=window_size,
shift_size=shift_size, use_attn=use_attn, conv_type=conv_type)
self.norm2 = norm_layer(dim) if use_attn and mlp_norm else nn.Identity()
self.mlp = Mlp(network_depth, dim, hidden_features=int(dim * mlp_ratio))
def forward(self, x):
identity = x
if self.use_attn: x, rescale, rebias = self.norm1(x)
x = self.attn(x)
if self.use_attn: x = x * rescale + rebias
x = identity + x
identity = x
if self.use_attn and self.mlp_norm: x, rescale, rebias = self.norm2(x)
x = self.mlp(x)
if self.use_attn and self.mlp_norm: x = x * rescale + rebias
x = identity + x
return x
class BasicLayer(nn.Module):
def __init__(self, network_depth, dim, depth, num_heads, mlp_ratio=4.,
norm_layer=nn.LayerNorm, window_size=8,
attn_ratio=0., attn_loc='last', conv_type=None):
super().__init__()
self.dim = dim
self.depth = depth
attn_depth = attn_ratio * depth
if attn_loc == 'last':
use_attns = [i >= depth-attn_depth for i in range(depth)]
elif attn_loc == 'first':
use_attns = [i < attn_depth for i in range(depth)]
elif attn_loc == 'middle':
use_attns = [i >= (depth-attn_depth)//2 and i < (depth+attn_depth)//2 for i in range(depth)]
# build blocks
self.blocks = nn.ModuleList([
TransformerBlock(network_depth=network_depth,
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
norm_layer=norm_layer,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
use_attn=use_attns[i], conv_type=conv_type)
for i in range(depth)])
def forward(self, x):
for blk in self.blocks:
x = blk(x)
return x
class PatchEmbed(nn.Module):
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, kernel_size=None):
super().__init__()
self.in_chans = in_chans
self.embed_dim = embed_dim
if kernel_size is None:
kernel_size = patch_size
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=patch_size,
padding=(kernel_size-patch_size+1)//2, padding_mode='reflect')
def forward(self, x):
x = self.proj(x)
return x
class PatchUnEmbed(nn.Module):
def __init__(self, patch_size=4, out_chans=3, embed_dim=96, kernel_size=None):
super().__init__()
self.out_chans = out_chans
self.embed_dim = embed_dim
if kernel_size is None:
kernel_size = 1
self.proj = nn.Sequential(
nn.Conv2d(embed_dim, out_chans*patch_size**2, kernel_size=kernel_size,
padding=kernel_size//2, padding_mode='reflect'),
nn.PixelShuffle(patch_size)
)
def forward(self, x):
x = self.proj(x)
return x
class SKFusion(nn.Module):
def __init__(self, dim, height=2, reduction=8):
super(SKFusion, self).__init__()
self.height = height
d = max(int(dim/reduction), 4)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(dim, d, 1, bias=False),
nn.ReLU(),
nn.Conv2d(d, dim*height, 1, bias=False)
)
self.softmax = nn.Softmax(dim=1)
def forward(self, in_feats):
B, C, H, W = in_feats[0].shape
in_feats = torch.cat(in_feats, dim=1)
in_feats = in_feats.view(B, self.height, C, H, W)
feats_sum = torch.sum(in_feats, dim=1)
attn = self.mlp(self.avg_pool(feats_sum))
attn = self.softmax(attn.view(B, self.height, C, 1, 1))
out = torch.sum(in_feats*attn, dim=1)
return out
class DehazeFormer(nn.Module):
def __init__(self, in_chans=3, out_chans=4, window_size=8,
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[16, 16, 16, 8, 8],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'],
norm_layer=[RLN, RLN, RLN, RLN, RLN]):
super(DehazeFormer, self).__init__()
# setting
self.patch_size = 4
self.window_size = window_size
self.mlp_ratios = mlp_ratios
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=1, in_chans=in_chans, embed_dim=embed_dims[0], kernel_size=3)
# backbone
self.layer1 = BasicLayer(network_depth=sum(depths), dim=embed_dims[0], depth=depths[0],
num_heads=num_heads[0], mlp_ratio=mlp_ratios[0],
norm_layer=norm_layer[0], window_size=window_size,
attn_ratio=attn_ratio[0], attn_loc='last', conv_type=conv_type[0])
self.patch_merge1 = PatchEmbed(
patch_size=2, in_chans=embed_dims[0], embed_dim=embed_dims[1])
self.skip1 = nn.Conv2d(embed_dims[0], embed_dims[0], 1)
self.layer2 = BasicLayer(network_depth=sum(depths), dim=embed_dims[1], depth=depths[1],
num_heads=num_heads[1], mlp_ratio=mlp_ratios[1],
norm_layer=norm_layer[1], window_size=window_size,
attn_ratio=attn_ratio[1], attn_loc='last', conv_type=conv_type[1])
self.patch_merge2 = PatchEmbed(
patch_size=2, in_chans=embed_dims[1], embed_dim=embed_dims[2])
self.skip2 = nn.Conv2d(embed_dims[1], embed_dims[1], 1)
self.layer3 = BasicLayer(network_depth=sum(depths), dim=embed_dims[2], depth=depths[2],
num_heads=num_heads[2], mlp_ratio=mlp_ratios[2],
norm_layer=norm_layer[2], window_size=window_size,
attn_ratio=attn_ratio[2], attn_loc='last', conv_type=conv_type[2])
self.patch_split1 = PatchUnEmbed(
patch_size=2, out_chans=embed_dims[3], embed_dim=embed_dims[2])
assert embed_dims[1] == embed_dims[3]
self.fusion1 = SKFusion(embed_dims[3])
self.layer4 = BasicLayer(network_depth=sum(depths), dim=embed_dims[3], depth=depths[3],
num_heads=num_heads[3], mlp_ratio=mlp_ratios[3],
norm_layer=norm_layer[3], window_size=window_size,
attn_ratio=attn_ratio[3], attn_loc='last', conv_type=conv_type[3])
self.patch_split2 = PatchUnEmbed(
patch_size=2, out_chans=embed_dims[4], embed_dim=embed_dims[3])
assert embed_dims[0] == embed_dims[4]
self.fusion2 = SKFusion(embed_dims[4])
self.layer5 = BasicLayer(network_depth=sum(depths), dim=embed_dims[4], depth=depths[4],
num_heads=num_heads[4], mlp_ratio=mlp_ratios[4],
norm_layer=norm_layer[4], window_size=window_size,
attn_ratio=attn_ratio[4], attn_loc='last', conv_type=conv_type[4])
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
patch_size=1, out_chans=out_chans, embed_dim=embed_dims[4], kernel_size=3)
def check_image_size(self, x):
# NOTE: for I2I test
_, _, h, w = x.size()
mod_pad_h = (self.patch_size - h % self.patch_size) % self.patch_size
mod_pad_w = (self.patch_size - w % self.patch_size) % self.patch_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layer1(x)
skip1 = x
x = self.patch_merge1(x)
x = self.layer2(x)
skip2 = x
x = self.patch_merge2(x)
x = self.layer3(x)
x = self.patch_split1(x)
x = self.fusion1([x, self.skip2(skip2)]) + x
x = self.layer4(x)
x = self.patch_split2(x)
x = self.fusion2([x, self.skip1(skip1)]) + x
x = self.layer5(x)
x = self.patch_unembed(x)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
feat = self.forward_features(x)
K, B = torch.split(feat, (1, 3), dim=1)
x = K * x - B + x
x = x[:, :, :H, :W]
return x
def dehazeformer_t():
return DehazeFormer(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[4, 4, 4, 2, 2],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[0, 1/2, 1, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'])
def dehazeformer_s():
return DehazeFormer(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[8, 8, 8, 4, 4],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'])
def dehazeformer_b():
return DehazeFormer(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[16, 16, 16, 8, 8],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'])
def dehazeformer_d():
return DehazeFormer(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[32, 32, 32, 16, 16],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'])
def dehazeformer_w():
return DehazeFormer(
embed_dims=[48, 96, 192, 96, 48],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[16, 16, 16, 8, 8],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['DWConv', 'DWConv', 'DWConv', 'DWConv', 'DWConv'])
def dehazeformer_m():
return DehazeFormer(
embed_dims=[24, 48, 96, 48, 24],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[12, 12, 12, 6, 6],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['Conv', 'Conv', 'Conv', 'Conv', 'Conv'])
def dehazeformer_l():
return DehazeFormer(
embed_dims=[48, 96, 192, 96, 48],
mlp_ratios=[2., 4., 4., 2., 2.],
depths=[16, 16, 16, 12, 12],
num_heads=[2, 4, 6, 1, 1],
attn_ratio=[1/4, 1/2, 3/4, 0, 0],
conv_type=['Conv', 'Conv', 'Conv', 'Conv', 'Conv']) | 17,111 | 29.502674 | 108 | py |
DehazeFormer | DehazeFormer-main/datasets/loader.py | import os
import random
import numpy as np
import cv2
from torch.utils.data import Dataset
from utils import hwc_to_chw, read_img
def augment(imgs=[], size=256, edge_decay=0., only_h_flip=False):
H, W, _ = imgs[0].shape
Hc, Wc = [size, size]
# simple re-weight for the edge
if random.random() < Hc / H * edge_decay:
Hs = 0 if random.randint(0, 1) == 0 else H - Hc
else:
Hs = random.randint(0, H-Hc)
if random.random() < Wc / W * edge_decay:
Ws = 0 if random.randint(0, 1) == 0 else W - Wc
else:
Ws = random.randint(0, W-Wc)
for i in range(len(imgs)):
imgs[i] = imgs[i][Hs:(Hs+Hc), Ws:(Ws+Wc), :]
# horizontal flip
if random.randint(0, 1) == 1:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1)
if not only_h_flip:
# bad data augmentations for outdoor
rot_deg = random.randint(0, 3)
for i in range(len(imgs)):
imgs[i] = np.rot90(imgs[i], rot_deg, (0, 1))
return imgs
def align(imgs=[], size=256):
H, W, _ = imgs[0].shape
Hc, Wc = [size, size]
Hs = (H - Hc) // 2
Ws = (W - Wc) // 2
for i in range(len(imgs)):
imgs[i] = imgs[i][Hs:(Hs+Hc), Ws:(Ws+Wc), :]
return imgs
class PairLoader(Dataset):
def __init__(self, data_dir, sub_dir, mode, size=256, edge_decay=0, only_h_flip=False):
assert mode in ['train', 'valid', 'test']
self.mode = mode
self.size = size
self.edge_decay = edge_decay
self.only_h_flip = only_h_flip
self.root_dir = os.path.join(data_dir, sub_dir)
self.img_names = sorted(os.listdir(os.path.join(self.root_dir, 'GT')))
self.img_num = len(self.img_names)
def __len__(self):
return self.img_num
def __getitem__(self, idx):
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
# read image, and scale [0, 1] to [-1, 1]
img_name = self.img_names[idx]
source_img = read_img(os.path.join(self.root_dir, 'hazy', img_name)) * 2 - 1
target_img = read_img(os.path.join(self.root_dir, 'GT', img_name)) * 2 - 1
if self.mode == 'train':
[source_img, target_img] = augment([source_img, target_img], self.size, self.edge_decay, self.only_h_flip)
if self.mode == 'valid':
[source_img, target_img] = align([source_img, target_img], self.size)
return {'source': hwc_to_chw(source_img), 'target': hwc_to_chw(target_img), 'filename': img_name}
class SingleLoader(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
self.img_names = sorted(os.listdir(self.root_dir))
self.img_num = len(self.img_names)
def __len__(self):
return self.img_num
def __getitem__(self, idx):
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
# read image, and scale [0, 1] to [-1, 1]
img_name = self.img_names[idx]
img = read_img(os.path.join(self.root_dir, img_name)) * 2 - 1
return {'img': hwc_to_chw(img), 'filename': img_name}
| 2,768 | 25.122642 | 109 | py |
DehazeFormer | DehazeFormer-main/utils/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if len(self.device_ids) == 1:
inputs, kwargs = super().scatter(inputs, kwargs, self.device_ids)
return self.module(*inputs[0], **kwargs[0])
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if self.gpu0_bsz == 0:
replicas = self.replicate(self.module, self.device_ids)
else:
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# replicas = self.replicate(self.module, device_ids[:len(inputs)])
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids[:len(inputs)])
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim) | 4,069 | 38.514563 | 84 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/setup.py | from setuptools import setup, find_packages
setup(
name = 'vector_quantize_pytorch',
packages = find_packages(),
version = '1.6.30',
license='MIT',
description = 'Vector Quantization - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/vector-quantizer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'pytorch',
'quantization'
],
install_requires=[
'einops>=0.6.1',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 810 | 25.16129 | 65 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/examples/autoencoder.py | # FashionMnist VQ experiment with various settings.
# From https://github.com/minyoungg/vqtorch/blob/main/examples/autoencoder.py
from tqdm.auto import trange
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from vector_quantize_pytorch import VectorQuantize
lr = 3e-4
train_iter = 1000
num_codes = 256
seed = 1234
device = "cuda" if torch.cuda.is_available() else "cpu"
class SimpleVQAutoEncoder(nn.Module):
def __init__(self, **vq_kwargs):
super().__init__()
self.layers = nn.ModuleList(
[
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.GELU(),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2),
VectorQuantize(dim=32, **vq_kwargs),
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1),
]
)
return
def forward(self, x):
for layer in self.layers:
if isinstance(layer, VectorQuantize):
x_shape = x.shape[:-1]
x_flat = x.view(x.size(0), -1, x.size(1))
x_flat, indices, commit_loss = layer(x_flat)
x = x_flat.view(*x_shape, -1)
else:
x = layer(x)
return x.clamp(-1, 1), indices, commit_loss
def train(model, train_loader, train_iterations=1000, alpha=10):
def iterate_dataset(data_loader):
data_iter = iter(data_loader)
while True:
try:
x, y = next(data_iter)
except StopIteration:
data_iter = iter(data_loader)
x, y = next(data_iter)
yield x.to(device), y.to(device)
for _ in (pbar := trange(train_iterations)):
opt.zero_grad()
x, _ = next(iterate_dataset(train_loader))
out, indices, cmt_loss = model(x)
rec_loss = (out - x).abs().mean()
(rec_loss + alpha * cmt_loss).backward()
opt.step()
pbar.set_description(
f"rec loss: {rec_loss.item():.3f} | "
+ f"cmt loss: {cmt_loss.item():.3f} | "
+ f"active %: {indices.unique().numel() / num_codes * 100:.3f}"
)
return
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
train_dataset = DataLoader(
datasets.FashionMNIST(
root="~/data/fashion_mnist", train=True, download=True, transform=transform
),
batch_size=256,
shuffle=True,
)
print("baseline")
torch.random.manual_seed(seed)
model = SimpleVQAutoEncoder(codebook_size=num_codes).to(device)
opt = torch.optim.AdamW(model.parameters(), lr=lr)
train(model, train_dataset, train_iterations=train_iter)
| 3,076 | 31.052083 | 83 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/vector_quantize_pytorch/vector_quantize_pytorch.py | from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
import torch.distributed as distributed
from torch.optim import Optimizer
from torch.cuda.amp import autocast
from einops import rearrange, repeat, reduce, pack, unpack
from typing import Callable
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def noop(*args, **kwargs):
pass
def identity(t):
return t
def l2norm(t):
return F.normalize(t, p = 2, dim = -1)
def cdist(x, y):
x2 = reduce(x ** 2, 'b n d -> b n', 'sum')
y2 = reduce(y ** 2, 'b n d -> b n', 'sum')
xy = einsum('b i d, b j d -> b i j', x, y) * -2
return (rearrange(x2, 'b i -> b i 1') + rearrange(y2, 'b j -> b 1 j') + xy).sqrt()
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def ema_inplace(old, new, decay):
is_mps = str(old.device).startswith('mps:')
if not is_mps:
old.lerp_(new, 1 - decay)
else:
old.mul_(decay).add_(new * (1 - decay))
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def uniform_init(*shape):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(
logits,
temperature = 1.,
stochastic = False,
straight_through = False,
reinmax = False,
dim = -1,
training = True
):
dtype, size = logits.dtype, logits.shape[dim]
if training and stochastic and temperature > 0:
sampling_logits = (logits / temperature) + gumbel_noise(logits)
else:
sampling_logits = logits
ind = sampling_logits.argmax(dim = dim)
one_hot = F.one_hot(ind, size).type(dtype)
assert not (reinmax and not straight_through), 'reinmax can only be turned on if using straight through gumbel softmax'
if not straight_through or temperature <= 0. or not training:
return ind, one_hot
# use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
# algorithm 2
if reinmax:
π0 = logits.softmax(dim = dim)
π1 = (one_hot + (logits / temperature).softmax(dim = dim)) / 2
π1 = ((log(π1) - logits).detach() + logits).softmax(dim = 1)
π2 = 2 * π1 - 0.5 * π0
one_hot = π2 - π2.detach() + one_hot
else:
π1 = (logits / temperature).softmax(dim = dim)
one_hot = one_hot + π1 - π1.detach()
return ind, one_hot
def laplace_smoothing(x, n_categories, eps = 1e-5, dim = -1):
denom = x.sum(dim = dim, keepdim = True)
return (x + eps) / (denom + n_categories * eps)
def sample_vectors(samples, num):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = torch.randperm(num_samples, device = device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device = device)
return samples[indices]
def batched_sample_vectors(samples, num):
return torch.stack([sample_vectors(sample, num) for sample in samples.unbind(dim = 0)], dim = 0)
def pad_shape(shape, size, dim = 0):
return [size if i == dim else s for i, s in enumerate(shape)]
def sample_multinomial(total_count, probs):
device = probs.device
probs = probs.cpu()
total_count = probs.new_full((), total_count)
remainder = probs.new_ones(())
sample = torch.empty_like(probs, dtype = torch.long)
for i, p in enumerate(probs):
s = torch.binomial(total_count, p / remainder)
sample[i] = s
total_count -= s
remainder -= p
return sample.to(device)
def all_gather_sizes(x, dim):
size = torch.tensor(x.shape[dim], dtype = torch.long, device = x.device)
all_sizes = [torch.empty_like(size) for _ in range(distributed.get_world_size())]
distributed.all_gather(all_sizes, size)
return torch.stack(all_sizes)
def all_gather_variably_sized(x, sizes, dim = 0):
rank = distributed.get_rank()
all_x = []
for i, size in enumerate(sizes):
t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim))
distributed.broadcast(t, src = i, async_op = True)
all_x.append(t)
distributed.barrier()
return all_x
def sample_vectors_distributed(local_samples, num):
local_samples = rearrange(local_samples, '1 ... -> ...')
rank = distributed.get_rank()
all_num_samples = all_gather_sizes(local_samples, dim = 0)
if rank == 0:
samples_per_rank = sample_multinomial(num, all_num_samples / all_num_samples.sum())
else:
samples_per_rank = torch.empty_like(all_num_samples)
distributed.broadcast(samples_per_rank, src = 0)
samples_per_rank = samples_per_rank.tolist()
local_samples = sample_vectors(local_samples, samples_per_rank[rank])
all_samples = all_gather_variably_sized(local_samples, samples_per_rank, dim = 0)
out = torch.cat(all_samples, dim = 0)
return rearrange(out, '... -> 1 ...')
def batched_bincount(x, *, minlength):
batch, dtype, device = x.shape[0], x.dtype, x.device
target = torch.zeros(batch, minlength, dtype = dtype, device = device)
values = torch.ones_like(x)
target.scatter_add_(-1, x, values)
return target
def kmeans(
samples,
num_clusters,
num_iters = 10,
use_cosine_sim = False,
sample_fn = batched_sample_vectors,
all_reduce_fn = noop
):
num_codebooks, dim, dtype, device = samples.shape[0], samples.shape[-1], samples.dtype, samples.device
means = sample_fn(samples, num_clusters)
for _ in range(num_iters):
if use_cosine_sim:
dists = samples @ rearrange(means, 'h n d -> h d n')
else:
dists = -torch.cdist(samples, means, p = 2)
buckets = torch.argmax(dists, dim = -1)
bins = batched_bincount(buckets, minlength = num_clusters)
all_reduce_fn(bins)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_codebooks, num_clusters, dim, dtype = dtype)
new_means.scatter_add_(1, repeat(buckets, 'h n -> h n d', d = dim), samples)
new_means = new_means / rearrange(bins_min_clamped, '... -> ... 1')
all_reduce_fn(new_means)
if use_cosine_sim:
new_means = l2norm(new_means)
means = torch.where(
rearrange(zero_mask, '... -> ... 1'),
means,
new_means
)
return means, bins
def batched_embedding(indices, embeds):
batch, dim = indices.shape[1], embeds.shape[-1]
indices = repeat(indices, 'h b n -> h b n d', d = dim)
embeds = repeat(embeds, 'h c d -> h b c d', b = batch)
return embeds.gather(2, indices)
# regularization losses
def orthogonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
h, n = t.shape[:2]
normed_codes = l2norm(t)
cosine_sim = einsum('h i d, h j d -> h i j', normed_codes, normed_codes)
return (cosine_sim ** 2).sum() / (h * n ** 2) - (1 / n)
# distance types
class EuclideanCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks = 1,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
decay = 0.8,
eps = 1e-5,
threshold_ema_dead_code = 2,
reset_cluster_size = None,
use_ddp = False,
learnable_codebook = False,
gumbel_sample = gumbel_sample,
sample_codebook_temp = 1.,
ema_update = True,
affine_param = False,
sync_affine_param = False,
affine_param_batch_decay = 0.99,
affine_param_codebook_decay = 0.9
):
super().__init__()
self.transform_input = identity
self.decay = decay
self.ema_update = ema_update
init_fn = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.reset_cluster_size = default(reset_cluster_size, threshold_ema_dead_code)
assert callable(gumbel_sample)
self.gumbel_sample = gumbel_sample
self.sample_codebook_temp = sample_codebook_temp
assert not (use_ddp and num_codebooks > 1 and kmeans_init), 'kmeans init is not compatible with multiple codebooks in distributed environment for now'
self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors
self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer('initted', torch.Tensor([not kmeans_init]))
self.register_buffer('cluster_size', torch.zeros(num_codebooks, codebook_size))
self.register_buffer('embed_avg', embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer('embed', embed)
# affine related params
self.affine_param = affine_param
self.sync_affine_param = sync_affine_param
if not affine_param:
return
self.affine_param_batch_decay = affine_param_batch_decay
self.affine_param_codebook_decay = affine_param_codebook_decay
self.register_buffer('batch_mean', None)
self.register_buffer('batch_variance', None)
self.register_buffer('codebook_mean_needs_init', torch.Tensor([True]))
self.register_buffer('codebook_mean', torch.empty(num_codebooks, 1, dim))
self.register_buffer('codebook_variance_needs_init', torch.Tensor([True]))
self.register_buffer('codebook_variance', torch.empty(num_codebooks, 1, dim))
@torch.jit.ignore
def init_embed_(self, data, mask = None):
if self.initted:
return
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
sample_fn = self.sample_fn,
all_reduce_fn = self.kmeans_all_reduce_fn
)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(torch.Tensor([True]))
@torch.jit.ignore
def update_with_decay(self, buffer_name, new_value, decay):
old_value = getattr(self, buffer_name)
needs_init = getattr(self, buffer_name + "_needs_init", False)
if needs_init:
self.register_buffer(buffer_name + "_needs_init", torch.Tensor([False]))
if not exists(old_value) or needs_init:
self.register_buffer(buffer_name, new_value.detach())
return
value = old_value * decay + new_value.detach() * (1 - decay)
self.register_buffer(buffer_name, value)
@torch.jit.ignore
def update_affine(self, data, embed, mask = None):
assert self.affine_param
var_fn = partial(torch.var, unbiased = False)
# calculate codebook mean and variance
embed = rearrange(embed, 'h ... d -> h (...) d')
if self.training:
self.update_with_decay('codebook_mean', reduce(embed, 'h n d -> h 1 d', 'mean'), self.affine_param_codebook_decay)
self.update_with_decay('codebook_variance', reduce(embed, 'h n d -> h 1 d', var_fn), self.affine_param_codebook_decay)
# prepare batch data, which depends on whether it has masking
data = rearrange(data, 'h ... d -> h (...) d')
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
# calculate batch mean and variance
if not self.sync_affine_param:
self.update_with_decay('batch_mean', reduce(data, 'h n d -> h 1 d', 'mean'), self.affine_param_batch_decay)
self.update_with_decay('batch_variance', reduce(data, 'h n d -> h 1 d', var_fn), self.affine_param_batch_decay)
return
num_vectors, device, dtype = data.shape[-2], data.device, data.dtype
# number of vectors, for denominator
num_vectors = torch.tensor([num_vectors], device = device, dtype = dtype)
distributed.all_reduce(num_vectors)
# calculate distributed mean
batch_sum = reduce(data, 'h n d -> h 1 d', 'sum')
distributed.all_reduce(batch_sum)
batch_mean = batch_sum / num_vectors
self.update_with_decay('batch_mean', batch_mean, self.affine_param_batch_decay)
# calculate distributed variance
variance_numer = reduce((data - batch_mean) ** 2, 'h n d -> h 1 d', 'sum')
distributed.all_reduce(variance_numer)
batch_variance = variance_numer / num_vectors
self.update_with_decay('batch_variance', batch_variance, self.affine_param_batch_decay)
def replace(self, batch_samples, batch_mask):
for ind, (samples, mask) in enumerate(zip(batch_samples.unbind(dim = 0), batch_mask.unbind(dim = 0))):
if not torch.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, '... -> 1 ...'), mask.sum().item())
sampled = rearrange(sampled, '1 ... -> ...')
self.embed.data[ind][mask] = sampled
self.cluster_size.data[ind][mask] = self.reset_cluster_size
self.embed_avg.data[ind][mask] = sampled * self.reset_cluster_size
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d')
self.replace(batch_samples, batch_mask = expired_codes)
@autocast(enabled = False)
def forward(
self,
x,
sample_codebook_temp = None,
mask = None
):
needs_codebook_dim = x.ndim < 4
sample_codebook_temp = default(sample_codebook_temp, self.sample_codebook_temp)
x = x.float()
if needs_codebook_dim:
x = rearrange(x, '... -> 1 ...')
dtype = x.dtype
flatten, ps = pack_one(x, 'h * d')
if exists(mask):
mask = repeat(mask, 'b n -> c (b h n)', c = flatten.shape[0], h = flatten.shape[-2] // (mask.shape[0] * mask.shape[1]))
self.init_embed_(flatten, mask = mask)
if self.affine_param:
self.update_affine(flatten, self.embed, mask = mask)
embed = self.embed if self.learnable_codebook else self.embed.detach()
if self.affine_param:
codebook_std = self.codebook_variance.clamp(min = 1e-5).sqrt()
batch_std = self.batch_variance.clamp(min = 1e-5).sqrt()
embed = (embed - self.codebook_mean) * (batch_std / codebook_std) + self.batch_mean
dist = -cdist(flatten, embed)
embed_ind, embed_onehot = self.gumbel_sample(dist, dim = -1, temperature = sample_codebook_temp, training = self.training)
embed_ind = unpack_one(embed_ind, ps, 'h *')
if self.training:
unpacked_onehot = unpack_one(embed_onehot, ps, 'h * c')
quantize = einsum('h b n c, h c d -> h b n d', unpacked_onehot, embed)
else:
quantize = batched_embedding(embed_ind, embed)
if self.training and self.ema_update:
if self.affine_param:
flatten = (flatten - self.batch_mean) * (codebook_std / batch_std) + self.codebook_mean
if exists(mask):
embed_onehot[~mask] = 0.
cluster_size = embed_onehot.sum(dim = 1)
self.all_reduce_fn(cluster_size)
ema_inplace(self.cluster_size.data, cluster_size, self.decay)
embed_sum = einsum('h n d, h n c -> h c d', flatten, embed_onehot)
self.all_reduce_fn(embed_sum.contiguous())
ema_inplace(self.embed_avg.data, embed_sum, self.decay)
cluster_size = laplace_smoothing(self.cluster_size, self.codebook_size, self.eps) * self.cluster_size.sum(dim = -1, keepdim = True)
embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1')
self.embed.data.copy_(embed_normalized)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, '1 ... -> ...'), (quantize, embed_ind))
dist = unpack_one(dist, ps, 'h * d')
return quantize, embed_ind, dist
class CosineSimCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks = 1,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
decay = 0.8,
eps = 1e-5,
threshold_ema_dead_code = 2,
reset_cluster_size = None,
use_ddp = False,
learnable_codebook = False,
gumbel_sample = gumbel_sample,
sample_codebook_temp = 1.,
ema_update = True
):
super().__init__()
self.transform_input = l2norm
self.ema_update = ema_update
self.decay = decay
if not kmeans_init:
embed = l2norm(uniform_init(num_codebooks, codebook_size, dim))
else:
embed = torch.zeros(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.reset_cluster_size = default(reset_cluster_size, threshold_ema_dead_code)
assert callable(gumbel_sample)
self.gumbel_sample = gumbel_sample
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors
self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer('initted', torch.Tensor([not kmeans_init]))
self.register_buffer('cluster_size', torch.zeros(num_codebooks, codebook_size))
self.register_buffer('embed_avg', embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer('embed', embed)
@torch.jit.ignore
def init_embed_(self, data, mask = None):
if self.initted:
return
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
use_cosine_sim = True,
sample_fn = self.sample_fn,
all_reduce_fn = self.kmeans_all_reduce_fn
)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(torch.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(zip(batch_samples.unbind(dim = 0), batch_mask.unbind(dim = 0))):
if not torch.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, '... -> 1 ...'), mask.sum().item())
sampled = rearrange(sampled, '1 ... -> ...')
self.embed.data[ind][mask] = sampled
self.embed_avg.data[ind][mask] = sampled * self.reset_cluster_size
self.cluster_size.data[ind][mask] = self.reset_cluster_size
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d')
self.replace(batch_samples, batch_mask = expired_codes)
@autocast(enabled = False)
def forward(
self,
x,
sample_codebook_temp = None,
mask = None
):
needs_codebook_dim = x.ndim < 4
sample_codebook_temp = default(sample_codebook_temp, self.sample_codebook_temp)
x = x.float()
if needs_codebook_dim:
x = rearrange(x, '... -> 1 ...')
dtype = x.dtype
flatten, ps = pack_one(x, 'h * d')
if exists(mask):
mask = repeat(mask, 'b n -> c (b h n)', c = flatten.shape[0], h = flatten.shape[-2] // (mask.shape[0] * mask.shape[1]))
self.init_embed_(flatten, mask = mask)
embed = self.embed if self.learnable_codebook else self.embed.detach()
dist = einsum('h n d, h c d -> h n c', flatten, embed)
embed_ind, embed_onehot = self.gumbel_sample(dist, dim = -1, temperature = sample_codebook_temp, training = self.training)
embed_ind = unpack_one(embed_ind, ps, 'h *')
if self.training:
unpacked_onehot = unpack_one(embed_onehot, ps, 'h * c')
quantize = einsum('h b n c, h c d -> h b n d', unpacked_onehot, embed)
else:
quantize = batched_embedding(embed_ind, embed)
if self.training and self.ema_update:
if exists(mask):
embed_onehot[~mask] = 0.
bins = embed_onehot.sum(dim = 1)
self.all_reduce_fn(bins)
ema_inplace(self.cluster_size.data, bins, self.decay)
embed_sum = einsum('h n d, h n c -> h c d', flatten, embed_onehot)
self.all_reduce_fn(embed_sum.contiguous())
ema_inplace(self.embed_avg.data, embed_sum, self.decay)
cluster_size = laplace_smoothing(self.cluster_size, self.codebook_size, self.eps) * self.cluster_size.sum(dim = -1, keepdim = True)
embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1')
embed_normalized = l2norm(embed_normalized)
self.embed.data.copy_(l2norm(embed_normalized))
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, '1 ... -> ...'), (quantize, embed_ind))
dist = unpack_one(dist, ps, 'h * d')
return quantize, embed_ind, dist
# main class
class VectorQuantize(nn.Module):
def __init__(
self,
dim,
codebook_size,
codebook_dim = None,
heads = 1,
separate_codebook_per_head = False,
decay = 0.8,
eps = 1e-5,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
use_cosine_sim = False,
threshold_ema_dead_code = 0,
channel_last = True,
accept_image_fmap = False,
commitment_weight = 1.,
commitment_use_cross_entropy_loss = False,
orthogonal_reg_weight = 0.,
orthogonal_reg_active_codes_only = False,
orthogonal_reg_max_codes = None,
stochastic_sample_codes = False,
sample_codebook_temp = 1.,
straight_through = False,
reinmax = False, # using reinmax for improved straight-through, assuming straight through helps at all
sync_codebook = False,
sync_affine_param = False,
ema_update = True,
learnable_codebook = False,
in_place_codebook_optimizer: Callable[..., Optimizer] = None, # Optimizer used to update the codebook embedding if using learnable_codebook
affine_param = False,
affine_param_batch_decay = 0.99,
affine_param_codebook_decay = 0.9,
sync_update_v = 0. # the v that controls optimistic vs pessimistic update for synchronous update rule (21) https://minyoungg.github.io/vqtorch/assets/draft_050523.pdf
):
super().__init__()
self.dim = dim
self.heads = heads
self.separate_codebook_per_head = separate_codebook_per_head
codebook_dim = default(codebook_dim, dim)
codebook_input_dim = codebook_dim * heads
requires_projection = codebook_input_dim != dim
self.project_in = nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity()
self.project_out = nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity()
self.eps = eps
self.commitment_weight = commitment_weight
self.commitment_use_cross_entropy_loss = commitment_use_cross_entropy_loss # whether to use cross entropy loss to codebook as commitment loss
self.learnable_codebook = learnable_codebook
has_codebook_orthogonal_loss = orthogonal_reg_weight > 0
self.has_codebook_orthogonal_loss = has_codebook_orthogonal_loss
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
assert not (ema_update and learnable_codebook), 'learnable codebook not compatible with EMA update'
assert 0 <= sync_update_v <= 1.
assert not (sync_update_v > 0. and not learnable_codebook), 'learnable codebook must be turned on'
self.sync_update_v = sync_update_v
codebook_class = EuclideanCodebook if not use_cosine_sim else CosineSimCodebook
gumbel_sample_fn = partial(
gumbel_sample,
stochastic = stochastic_sample_codes,
reinmax = reinmax,
straight_through = straight_through
)
codebook_kwargs = dict(
dim = codebook_dim,
num_codebooks = heads if separate_codebook_per_head else 1,
codebook_size = codebook_size,
kmeans_init = kmeans_init,
kmeans_iters = kmeans_iters,
sync_kmeans = sync_kmeans,
decay = decay,
eps = eps,
threshold_ema_dead_code = threshold_ema_dead_code,
use_ddp = sync_codebook,
learnable_codebook = has_codebook_orthogonal_loss or learnable_codebook,
sample_codebook_temp = sample_codebook_temp,
gumbel_sample = gumbel_sample_fn,
ema_update = ema_update
)
if affine_param:
assert not use_cosine_sim, 'affine param is only compatible with euclidean codebook'
codebook_kwargs = dict(
**codebook_kwargs,
affine_param = True,
sync_affine_param = sync_affine_param,
affine_param_batch_decay = affine_param_batch_decay,
affine_param_codebook_decay = affine_param_codebook_decay,
)
self._codebook = codebook_class(**codebook_kwargs)
self.in_place_codebook_optimizer = in_place_codebook_optimizer(self._codebook.parameters()) if exists(in_place_codebook_optimizer) else None
self.codebook_size = codebook_size
self.accept_image_fmap = accept_image_fmap
self.channel_last = channel_last
@property
def codebook(self):
codebook = self._codebook.embed
if self.separate_codebook_per_head:
return codebook
return rearrange(codebook, '1 ... -> ...')
def get_codes_from_indices(self, indices):
codebook = self.codebook
is_multiheaded = codebook.ndim > 2
if not is_multiheaded:
codes = codebook[indices]
return rearrange(codes, '... h d -> ... (h d)')
indices, ps = pack_one(indices, 'b * h')
indices = rearrange(indices, 'b n h -> b h n')
indices = repeat(indices, 'b h n -> b h n d', d = codebook.shape[-1])
codebook = repeat(codebook, 'h n d -> b h n d', b = indices.shape[0])
codes = codebook.gather(2, indices)
codes = rearrange(codes, 'b h n d -> b n (h d)')
codes = unpack_one(codes, ps, 'b * d')
return codes
def forward(
self,
x,
indices = None,
mask = None,
sample_codebook_temp = None
):
orig_input = x
only_one = x.ndim == 2
if only_one:
assert not exists(mask)
x = rearrange(x, 'b d -> b 1 d')
shape, device, heads, is_multiheaded, codebook_size, return_loss = x.shape, x.device, self.heads, self.heads > 1, self.codebook_size, exists(indices)
need_transpose = not self.channel_last and not self.accept_image_fmap
should_inplace_optimize = exists(self.in_place_codebook_optimizer)
# rearrange inputs
if self.accept_image_fmap:
height, width = x.shape[-2:]
x = rearrange(x, 'b c h w -> b (h w) c')
if need_transpose:
x = rearrange(x, 'b d n -> b n d')
# project input
x = self.project_in(x)
# handle multi-headed separate codebooks
if is_multiheaded:
ein_rhs_eq = 'h b n d' if self.separate_codebook_per_head else '1 (b h) n d'
x = rearrange(x, f'b n (h d) -> {ein_rhs_eq}', h = heads)
# l2norm for cosine sim, otherwise identity
x = self._codebook.transform_input(x)
# codebook forward kwargs
codebook_forward_kwargs = dict(
sample_codebook_temp = sample_codebook_temp,
mask = mask
)
# quantize
quantize, embed_ind, distances = self._codebook(x, **codebook_forward_kwargs)
# one step in-place update
if should_inplace_optimize and self.training:
if exists(mask):
loss = F.mse_loss(quantize, x.detach(), reduction = 'none')
loss_mask = mask
if is_multiheaded:
loss_mask = repeat(mask, 'b n -> c (b h) n', c = loss.shape[0], h = loss.shape[1] // mask.shape[0])
loss = loss[loss_mask].mean()
else:
loss = F.mse_loss(quantize, x.detach())
loss.backward()
self.in_place_codebook_optimizer.step()
self.in_place_codebook_optimizer.zero_grad()
# quantize again
quantize, embed_ind, distances = self._codebook(x, **codebook_forward_kwargs)
if self.training:
# determine code to use for commitment loss
maybe_detach = torch.detach if not self.learnable_codebook else identity
commit_quantize = maybe_detach(quantize)
# straight through
quantize = x + (quantize - x).detach()
if self.sync_update_v > 0.:
# (21) in https://minyoungg.github.io/vqtorch/assets/draft_050523.pdf
quantize = quantize + self.sync_update_v * (quantize - quantize.detach())
# function for calculating cross entropy loss to distance matrix
# used for (1) naturalspeech2 training residual vq latents to be close to the correct codes and (2) cross-entropy based commitment loss
def calculate_ce_loss(codes):
if not is_multiheaded:
dist_einops_eq = '1 b n l -> b l n'
elif self.separate_codebook_per_head:
dist_einops_eq = 'c b n l -> b l n c'
else:
dist_einops_eq = '1 (b h) n l -> b l n h'
ce_loss = F.cross_entropy(
rearrange(distances, dist_einops_eq, b = shape[0]),
codes,
ignore_index = -1
)
return ce_loss
# if returning cross entropy loss on codes that were passed in
if return_loss:
return quantize, calculate_ce_loss(indices)
# transform embedding indices
if is_multiheaded:
if self.separate_codebook_per_head:
embed_ind = rearrange(embed_ind, 'h b n -> b n h', h = heads)
else:
embed_ind = rearrange(embed_ind, '1 (b h) n -> b n h', h = heads)
if self.accept_image_fmap:
embed_ind = rearrange(embed_ind, 'b (h w) ... -> b h w ...', h = height, w = width)
if only_one:
embed_ind = rearrange(embed_ind, 'b 1 -> b')
# aggregate loss
loss = torch.tensor([0.], device = device, requires_grad = self.training)
if self.training:
if self.commitment_weight > 0:
if self.commitment_use_cross_entropy_loss:
if exists(mask):
ce_loss_mask = mask
if is_multiheaded:
ce_loss_mask = repeat(ce_loss_mask, 'b n -> b n h', h = heads)
embed_ind.masked_fill_(~ce_loss_mask, -1)
commit_loss = calculate_ce_loss(embed_ind)
else:
if exists(mask):
# with variable lengthed sequences
commit_loss = F.mse_loss(commit_quantize, x, reduction = 'none')
loss_mask = mask
if is_multiheaded:
loss_mask = repeat(loss_mask, 'b n -> c (b h) n', c = commit_loss.shape[0], h = commit_loss.shape[1] // mask.shape[0])
commit_loss = commit_loss[loss_mask].mean()
else:
commit_loss = F.mse_loss(commit_quantize, x)
loss = loss + commit_loss * self.commitment_weight
if self.has_codebook_orthogonal_loss:
codebook = self._codebook.embed
# only calculate orthogonal loss for the activated codes for this batch
if self.orthogonal_reg_active_codes_only:
assert not (is_multiheaded and self.separate_codebook_per_head), 'orthogonal regularization for only active codes not compatible with multi-headed with separate codebooks yet'
unique_code_ids = torch.unique(embed_ind)
codebook = codebook[:, unique_code_ids]
num_codes = codebook.shape[-2]
if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
rand_ids = torch.randperm(num_codes, device = device)[:self.orthogonal_reg_max_codes]
codebook = codebook[:, rand_ids]
orthogonal_reg_loss = orthogonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
# handle multi-headed quantized embeddings
if is_multiheaded:
if self.separate_codebook_per_head:
quantize = rearrange(quantize, 'h b n d -> b n (h d)', h = heads)
else:
quantize = rearrange(quantize, '1 (b h) n d -> b n (h d)', h = heads)
# project out
quantize = self.project_out(quantize)
# rearrange quantized embeddings
if need_transpose:
quantize = rearrange(quantize, 'b n d -> b d n')
if self.accept_image_fmap:
quantize = rearrange(quantize, 'b (h w) c -> b c h w', h = height, w = width)
if only_one:
quantize = rearrange(quantize, 'b 1 d -> b d')
# if masking, only return quantized for where mask has True
if exists(mask):
quantize = torch.where(
rearrange(mask, '... -> ... 1'),
quantize,
orig_input
)
return quantize, embed_ind, loss
| 35,993 | 33.911736 | 195 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/vector_quantize_pytorch/random_projection_quantizer.py | import torch
from torch import nn, einsum
import torch.nn.functional as F
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from einops import rearrange, repeat, pack, unpack
def exists(val):
return val is not None
class RandomProjectionQuantizer(nn.Module):
""" https://arxiv.org/abs/2202.01855 """
def __init__(
self,
*,
dim,
codebook_size,
codebook_dim,
num_codebooks = 1,
norm = True,
**kwargs
):
super().__init__()
self.num_codebooks = num_codebooks
rand_projs = torch.empty(num_codebooks, dim, codebook_dim)
nn.init.xavier_normal_(rand_projs)
self.register_buffer('rand_projs', rand_projs)
# in section 3 of https://arxiv.org/abs/2202.01855
# "The input data is normalized to have 0 mean and standard deviation of 1 ... to prevent collapse"
self.norm = nn.LayerNorm(dim, elementwise_affine = False) if norm else nn.Identity()
self.vq = VectorQuantize(
dim = codebook_dim * num_codebooks,
heads = num_codebooks,
codebook_size = codebook_size,
use_cosine_sim = True,
separate_codebook_per_head = True,
**kwargs
)
def forward(
self,
x,
indices = None
):
return_loss = exists(indices)
x = self.norm(x)
x = einsum('b n d, h d e -> b n h e', x, self.rand_projs)
x, ps = pack([x], 'b n *')
self.vq.eval()
out = self.vq(x, indices = indices)
if return_loss:
_, ce_loss = out
return ce_loss
_, indices, _ = out
return indices
| 1,723 | 24.731343 | 107 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/vector_quantize_pytorch/residual_vq.py | from math import ceil
from functools import partial
from itertools import zip_longest
from random import randrange
import torch
from torch import nn
import torch.nn.functional as F
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def round_up_multiple(num, mult):
return ceil(num / mult) * mult
# main class
class ResidualVQ(nn.Module):
""" Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf """
def __init__(
self,
*,
num_quantizers,
shared_codebook = False,
heads = 1,
quantize_dropout = False,
quantize_dropout_cutoff_index = 0,
quantize_dropout_multiple_of = 1,
accept_image_fmap = False,
**kwargs
):
super().__init__()
assert heads == 1, 'residual vq is not compatible with multi-headed codes'
self.num_quantizers = num_quantizers
self.accept_image_fmap = accept_image_fmap
self.layers = nn.ModuleList([VectorQuantize(accept_image_fmap = accept_image_fmap, **kwargs) for _ in range(num_quantizers)])
self.quantize_dropout = quantize_dropout and num_quantizers > 1
assert quantize_dropout_cutoff_index >= 0
self.quantize_dropout_cutoff_index = quantize_dropout_cutoff_index
self.quantize_dropout_multiple_of = quantize_dropout_multiple_of # encodec paper proposes structured dropout, believe this was set to 4
if not shared_codebook:
return
first_vq, *rest_vq = self.layers
codebook = first_vq._codebook
for vq in rest_vq:
vq._codebook = codebook
@property
def codebooks(self):
codebooks = [layer._codebook.embed for layer in self.layers]
codebooks = torch.stack(codebooks, dim = 0)
codebooks = rearrange(codebooks, 'q 1 c d -> q c d')
return codebooks
def get_codes_from_indices(self, indices):
batch, quantize_dim = indices.shape[0], indices.shape[-1]
# may also receive indices in the shape of 'b h w q' (accept_image_fmap)
indices, ps = pack([indices], 'b * q')
# because of quantize dropout, one can pass in indices that are coarse
# and the network should be able to reconstruct
if quantize_dim < self.num_quantizers:
assert self.quantize_dropout > 0., 'quantize dropout must be greater than 0 if you wish to reconstruct from a signal with less fine quantizations'
indices = F.pad(indices, (0, self.num_quantizers - quantize_dim), value = -1)
# get ready for gathering
codebooks = repeat(self.codebooks, 'q c d -> q b c d', b = batch)
gather_indices = repeat(indices, 'b n q -> q b n d', d = codebooks.shape[-1])
# take care of quantizer dropout
mask = gather_indices == -1.
gather_indices = gather_indices.masked_fill(mask, 0) # have it fetch a dummy code to be masked out later
all_codes = codebooks.gather(2, gather_indices) # gather all codes
# mask out any codes that were dropout-ed
all_codes = all_codes.masked_fill(mask, 0.)
# if (accept_image_fmap = True) then return shape (quantize, batch, height, width, dimension)
all_codes, = unpack(all_codes, ps, 'q b * d')
return all_codes
def forward(
self,
x,
indices = None,
return_all_codes = False,
sample_codebook_temp = None
):
num_quant, quant_dropout_multiple_of, return_loss, device = self.num_quantizers, self.quantize_dropout_multiple_of, exists(indices), x.device
assert not (self.accept_image_fmap and exists(indices))
quantized_out = 0.
residual = x
all_losses = []
all_indices = []
if return_loss:
assert not torch.any(indices == -1), 'some of the residual vq indices were dropped out. please use indices derived when the module is in eval mode to derive cross entropy loss'
ce_losses = []
should_quantize_dropout = self.training and self.quantize_dropout and not return_loss
# sample a layer index at which to dropout further residual quantization
# also prepare null indices and loss
if should_quantize_dropout:
rand_quantize_dropout_index = randrange(self.quantize_dropout_cutoff_index, num_quant)
if quant_dropout_multiple_of != 1:
rand_quantize_dropout_index = round_up_multiple(rand_quantize_dropout_index + 1, quant_dropout_multiple_of) - 1
null_indices_shape = (x.shape[0], *x.shape[-2:]) if self.accept_image_fmap else tuple(x.shape[:2])
null_indices = torch.full(null_indices_shape, -1., device = device, dtype = torch.long)
null_loss = torch.full((1,), 0., device = device, dtype = x.dtype)
# go through the layers
for quantizer_index, layer in enumerate(self.layers):
if should_quantize_dropout and quantizer_index > rand_quantize_dropout_index:
all_indices.append(null_indices)
all_losses.append(null_loss)
continue
layer_indices = None
if return_loss:
layer_indices = indices[..., quantizer_index]
quantized, *rest = layer(residual, indices = layer_indices, sample_codebook_temp = sample_codebook_temp)
residual = residual - quantized.detach()
quantized_out = quantized_out + quantized
if return_loss:
ce_loss = rest[0]
ce_losses.append(ce_loss)
continue
embed_indices, loss = rest
all_indices.append(embed_indices)
all_losses.append(loss)
# whether to early return the cross entropy loss
if return_loss:
return quantized_out, sum(ce_losses)
# stack all losses and indices
all_losses, all_indices = map(partial(torch.stack, dim = -1), (all_losses, all_indices))
ret = (quantized_out, all_indices, all_losses)
if return_all_codes:
# whether to return all codes from all codebooks across layers
all_codes = self.get_codes_from_indices(all_indices)
# will return all codes in shape (quantizer, batch, sequence length, codebook dimension)
ret = (*ret, all_codes)
return ret
# grouped residual vq
class GroupedResidualVQ(nn.Module):
def __init__(
self,
*,
dim,
groups = 1,
accept_image_fmap = False,
**kwargs
):
super().__init__()
self.dim = dim
self.groups = groups
assert (dim % groups) == 0
dim_per_group = dim // groups
self.accept_image_fmap = accept_image_fmap
self.rvqs = nn.ModuleList([])
for _ in range(groups):
self.rvqs.append(ResidualVQ(
dim = dim_per_group,
accept_image_fmap = accept_image_fmap,
**kwargs
))
@property
def codebooks(self):
return torch.stack(tuple(rvq.codebooks for rvq in self.rvqs))
def get_codes_from_indices(self, indices):
codes = tuple(rvq.get_codes_from_indices(chunk_indices) for rvq, chunk_indices in zip(self.rvqs, indices))
return torch.stack(codes)
def forward(
self,
x,
indices = None,
return_all_codes = False,
sample_codebook_temp = None
):
shape = x.shape
split_dim = 1 if self.accept_image_fmap else -1
assert shape[split_dim] == self.dim
# split the feature dimension into groups
x = x.chunk(self.groups, dim = split_dim)
indices = default(indices, tuple())
return_ce_loss = len(indices) > 0
assert len(indices) == 0 or len(indices) == self.groups
forward_kwargs = dict(
return_all_codes = return_all_codes,
sample_codebook_temp = sample_codebook_temp
)
# invoke residual vq on each group
out = tuple(rvq(chunk, indices = chunk_indices, **forward_kwargs) for rvq, chunk, chunk_indices in zip_longest(self.rvqs, x, indices))
out = tuple(zip(*out))
# if returning cross entropy loss to rvq codebooks
if return_ce_loss:
quantized, ce_losses = out
return torch.cat(quantized, dim = split_dim), sum(ce_losses)
# otherwise, get all the zipped outputs and combine them
quantized, all_indices, commit_losses, *maybe_all_codes = out
quantized = torch.cat(quantized, dim = split_dim)
all_indices = torch.stack(all_indices)
commit_losses = torch.stack(commit_losses)
ret = (quantized, all_indices, commit_losses, *maybe_all_codes)
return ret
| 8,989 | 31.930403 | 188 | py |
vector-quantize-pytorch | vector-quantize-pytorch-master/vector_quantize_pytorch/__init__.py | from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from vector_quantize_pytorch.residual_vq import ResidualVQ, GroupedResidualVQ
from vector_quantize_pytorch.random_projection_quantizer import RandomProjectionQuantizer | 242 | 80 | 89 | py |
smt | smt-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SMT documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 6 19:36:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from smt import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SMT"
copyright = "2017, John Hwang"
author = "John Hwang, Mohamed Amine Bouhlel, Remi Lafage"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Take the full version when no need to distinguish version and release
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "bizstyle"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"rightsidebar": False,
"sidebarwidth": 250,
"body_min_width": 1100,
"body_max_width": 1100,
}
html_logo = "smt_logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "SMTdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"SMT.tex",
"SMT Documentation",
"John Hwang, Mohamed Amine Bouhlel, Remi Lafage",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "smt", "SMT Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"SMT",
"SMT Documentation",
author,
"SMT",
"One line description of project.",
"Miscellaneous",
)
]
| 5,197 | 28.873563 | 79 | py |
LongTailCXR | LongTailCXR-main/src/main.py | import os
import shutil
import argparse
import numpy as np
import pandas as pd
import torch
import torchvision
from sklearn.utils import class_weight
from datasets import *
from utils import *
from losses import *
def main(args):
# Set model/output directory name
MODEL_NAME = args.dataset
MODEL_NAME += f'_{args.model_name}'
MODEL_NAME += f'_rand' if args.rand_init else ''
MODEL_NAME += f'_bal-mixup-{args.mixup_alpha}' if args.bal_mixup else ''
MODEL_NAME += f'_mixup-{args.mixup_alpha}' if args.mixup else ''
MODEL_NAME += f'_decoupling-{args.decoupling_method}' if args.decoupling_method != '' else ''
MODEL_NAME += f'_rw-{args.rw_method}' if args.rw_method != '' else ''
MODEL_NAME += f'_{args.loss}'
MODEL_NAME += '-drw' if args.drw else ''
MODEL_NAME += f'_cb-beta-{args.cb_beta}' if args.rw_method == 'cb' else ''
MODEL_NAME += f'_fl-gamma-{args.fl_gamma}' if args.loss == 'focal' else ''
MODEL_NAME += f'_lr-{args.lr}'
MODEL_NAME += f'_bs-{args.batch_size}'
# Create output directory for model (and delete if already exists)
if not os.path.isdir(args.out_dir):
os.mkdir(args.out_dir)
model_dir = os.path.join(args.out_dir, MODEL_NAME)
if os.path.isdir(model_dir):
shutil.rmtree(model_dir)
os.mkdir(model_dir)
# Set all seeds for reproducibility
set_seed(args.seed)
# Create datasets + loaders
if args.dataset == 'nih-cxr-lt':
dataset = NIH_CXR_Dataset
N_CLASSES = 20
else:
dataset = MIMIC_CXR_Dataset
N_CLASSES = 19
train_dataset = dataset(data_dir=args.data_dir, label_dir=args.label_dir, split='train')
val_dataset = dataset(data_dir=args.data_dir, label_dir=args.label_dir, split='balanced-val')
bal_test_dataset = dataset(data_dir=args.data_dir, label_dir=args.label_dir, split='balanced-test')
test_dataset = dataset(data_dir=args.data_dir, label_dir=args.label_dir, split='test')
if args.bal_mixup:
cls_weights = [len(train_dataset) / cls_count for cls_count in train_dataset.cls_num_list]
instance_weights = [cls_weights[label] for label in train_dataset.labels]
sampler = torch.utils.data.WeightedRandomSampler(torch.Tensor(instance_weights), len(train_dataset))
bal_train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn, sampler=sampler)
imbal_train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn)
train_loader = ComboLoader([imbal_train_loader, bal_train_loader])
elif args.decoupling_method == 'cRT':
cls_weights = [len(train_dataset) / cls_count for cls_count in train_dataset.cls_num_list]
instance_weights = [cls_weights[label] for label in train_dataset.labels]
sampler = torch.utils.data.WeightedRandomSampler(torch.Tensor(instance_weights), len(train_dataset))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn, sampler=sampler)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True, worker_init_fn=val_worker_init_fn)
# Create csv documenting training history
history = pd.DataFrame(columns=['epoch', 'phase', 'loss', 'balanced_acc', 'mcc', 'auroc'])
history.to_csv(os.path.join(model_dir, 'history.csv'), index=False)
# Set device
device = torch.device('cuda:0')
# Instantiate model
model = torchvision.models.resnet50(pretrained=(not args.rand_init))
model.fc = torch.nn.Linear(model.fc.in_features, N_CLASSES)
if args.decoupling_method == 'tau_norm':
msg = model.load_state_dict(torch.load(args.decoupling_weights, map_location='cpu')['weights'])
print(f'Loaded weights from {args.decoupling_weights} with message: {msg}')
model.fc.bias.data = torch.zeros_like(model.fc.bias.data)
fc_weights = model.fc.weight.data.clone()
weight_norms = torch.norm(fc_weights, 2, 1)
model.fc.weight.data = torch.stack([fc_weights[i] / torch.pow(weight_norms[i], -4) for i in range(N_CLASSES)], dim=0)
elif args.decoupling_method == 'cRT':
msg = model.load_state_dict(torch.load(args.decoupling_weights, map_location='cpu')['weights'])
print(f'Loaded weights from {args.decoupling_weights} with message: {msg}')
model.fc = torch.nn.Linear(model.fc.in_features, N_CLASSES) # re-initialize classifier head
model = model.to(device)
# Set loss and weighting method
if args.rw_method == 'sklearn':
weights = class_weight.compute_class_weight(class_weight='balanced', classes=np.unique(train_dataset.labels), y=np.array(train_dataset.labels))
weights = torch.Tensor(weights).to(device)
elif args.rw_method == 'cb':
weights = get_CB_weights(samples_per_cls=train_dataset.cls_num_list, beta=args.cb_beta)
weights = torch.Tensor(weights).to(device)
else:
weights = None
if weights is None:
print('No class reweighting')
else:
print(f'Class weights with rw_method {args.rw_method}:')
for i, c in enumerate(train_dataset.CLASSES):
print(f'\t{c}: {weights[i]}')
loss_fxn = get_loss(args, None if args.drw else weights, train_dataset)
# Set optimizer
if args.decoupling_method != '':
optimizer = torch.optim.Adam(model.fc.parameters(), lr=args.lr)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# Train with early stopping
if args.decoupling_method != 'tau_norm':
epoch = 1
early_stopping_dict = {'best_acc': 0., 'epochs_no_improve': 0}
best_model_wts = None
while epoch <= args.max_epochs and early_stopping_dict['epochs_no_improve'] <= args.patience:
if args.bal_mixup:
history = bal_mixup_train(model=model, device=device, loss_fxn=loss_fxn, optimizer=optimizer, data_loader=train_loader, history=history, epoch=epoch, model_dir=model_dir, classes=train_dataset.CLASSES, mixup_alpha=args.mixup_alpha)
else:
history = train(model=model, device=device, loss_fxn=loss_fxn, optimizer=optimizer, data_loader=train_loader, history=history, epoch=epoch, model_dir=model_dir, classes=train_dataset.CLASSES, mixup=args.mixup, mixup_alpha=args.mixup_alpha)
history, early_stopping_dict, best_model_wts = validate(model=model, device=device, loss_fxn=loss_fxn, optimizer=optimizer, data_loader=val_loader, history=history, epoch=epoch, model_dir=model_dir, early_stopping_dict=early_stopping_dict, best_model_wts=best_model_wts, classes=val_dataset.CLASSES)
if args.drw and epoch == 10:
for g in optimizer.param_groups:
g['lr'] *= 0.1 # anneal LR
loss_fxn = get_loss(args, weights, train_dataset) # get class-weighted loss
early_stopping_dict['epochs_no_improve'] = 0 # reset patience
epoch += 1
else:
best_model_wts = model.state_dict()
# Evaluate on balanced test set
evaluate(model=model, device=device, loss_fxn=loss_fxn, dataset=bal_test_dataset, split='balanced-test', batch_size=args.batch_size, history=history, model_dir=model_dir, weights=best_model_wts)
# Evaluate on imbalanced test set
evaluate(model=model, device=device, loss_fxn=loss_fxn, dataset=test_dataset, split='test', batch_size=args.batch_size, history=history, model_dir=model_dir, weights=best_model_wts)
if __name__ == '__main__':
# Command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='/ssd1/greg/NIH_CXR/images', type=str)
parser.add_argument('--label_dir', default='labels/', type=str)
parser.add_argument('--out_dir', default='results/', type=str, help="path to directory where results and model weights will be saved")
parser.add_argument('--dataset', required=True, type=str, choices=['nih-lt', 'mimic-cxr-lt'])
parser.add_argument('--loss', default='ce', type=str, choices=['ce', 'focal', 'ldam'])
parser.add_argument('--drw', action='store_true', default=False)
parser.add_argument('--rw_method', default='', choices=['', 'sklearn', 'cb'])
parser.add_argument('--cb_beta', default=0.9999, type=float)
parser.add_argument('--fl_gamma', default=2., type=float)
parser.add_argument('--bal_mixup', action='store_true', default=False)
parser.add_argument('--mixup', action='store_true', default=False)
parser.add_argument('--mixup_alpha', default=0.2, type=float)
parser.add_argument('--decoupling_method', default='', choices=['', 'cRT', 'tau_norm'], type=str)
parser.add_argument('--decoupling_weights', type=str)
parser.add_argument('--model_name', default='resnet50', type=str, help="CNN backbone to use")
parser.add_argument('--max_epochs', default=60, type=int, help="maximum number of epochs to train")
parser.add_argument('--batch_size', default=256, type=int, help="batch size for training, validation, and testing (will be lowered if TTA used)")
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--patience', default=15, type=int, help="early stopping 'patience' during training")
parser.add_argument('--rand_init', action='store_true', default=False)
parser.add_argument('--n_TTA', default=0, type=int, help="number of augmented copies to use during test-time augmentation (TTA), default 0")
parser.add_argument('--seed', default=0, type=int, help="set random seed")
args = parser.parse_args()
print(args)
main(args) | 10,072 | 52.86631 | 311 | py |
LongTailCXR | LongTailCXR-main/src/losses.py | import numpy as np
import torch
import torch.nn.functional as F
def get_loss(args, weights, train_dataset):
if args.loss == 'ce':
loss_fxn = torch.nn.CrossEntropyLoss(weight=weights, reduction='mean')
elif args.loss == 'focal':
loss_fxn = torch.hub.load('adeelh/pytorch-multi-class-focal-loss', model='FocalLoss', alpha=weights, gamma=args.fl_gamma, reduction='mean')
elif args.loss == 'ldam':
loss_fxn = LDAMLoss(cls_num_list=train_dataset.cls_num_list, weight=weights)
return loss_fxn
def get_CB_weights(samples_per_cls, beta):
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * len(samples_per_cls)
return weights
## CREDIT TO https://github.com/kaidic/LDAM-DRW ##
class LDAMLoss(torch.nn.Module):
def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30):
super(LDAMLoss, self).__init__()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
self.weight = weight
print(self.weight)
def forward(self, x, target):
index = torch.zeros_like(x, dtype=torch.uint8)
index.scatter_(1, target.data.view(-1, 1), 1)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))
batch_m = batch_m.view((-1, 1))
x_m = x - batch_m
output = torch.where(index, x_m, x)
return F.cross_entropy(self.s*output, target, weight=self.weight)
| 1,722 | 34.163265 | 147 | py |
LongTailCXR | LongTailCXR-main/src/utils.py | import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import tqdm
from copy import deepcopy
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import roc_auc_score, balanced_accuracy_score, classification_report, confusion_matrix, matthews_corrcoef
from sklearn.preprocessing import LabelBinarizer
def set_seed(seed):
"""Set all random seeds and settings for reproducibility (deterministic behavior)."""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def worker_init_fn(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def val_worker_init_fn(worker_id):
np.random.seed(worker_id)
random.seed(worker_id)
def mixup_data(x, y, alpha=1.0, use_cuda=True):
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def bal_mixup_train(model, device, loss_fxn, optimizer, data_loader, history, epoch, model_dir, classes, mixup_alpha):
"""Train PyTorch model for one epoch on NIH ChestXRay14 dataset.
Parameters
----------
model : PyTorch model
device : PyTorch device
loss_fxn : PyTorch loss function
optimizer : PyTorch optimizer
data_loader : PyTorch data loader
history : pandas DataFrame
Data frame containing history of training metrics
epoch : int
Current epoch number (1-K)
model_dir : str
Path to output directory where metrics, model weights, etc. will be stored
classes : list[str]
Ordered list of names of output classes
Returns
-------
history : pandas DataFrame
Updated history data frame with metrics from completed training epoch
"""
pbar = tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc=f'Epoch {epoch}')
running_loss = 0.
y_true, y_hat = [], []
for i, batch in pbar:
x = batch[0][0].to(device)
y = batch[0][1].to(device)
bal_x = batch[0][0].to(device)
bal_y = batch[0][1].to(device)
lam = np.random.beta(mixup_alpha, mixup_alpha)
mixed_x = (1 - lam) * x + lam * bal_x
out = model(mixed_x)
loss = mixup_criterion(loss_fxn, out, y, bal_y, lam)
for param in model.parameters():
param.grad = None
loss.backward()
optimizer.step()
running_loss += loss.item()
y_hat.append(out.softmax(dim=1).detach().cpu().numpy())
y_true.append(y.detach().cpu().numpy())
pbar.set_postfix({'loss': running_loss / (i + 1)})
# Collect true and predicted labels into flat numpy arrays
y_true, y_hat = np.concatenate(y_true), np.concatenate(y_hat)
# Compute metrics
auc = roc_auc_score(y_true, y_hat, average='macro', multi_class='ovr')
b_acc = balanced_accuracy_score(y_true, y_hat.argmax(axis=1))
mcc = matthews_corrcoef(y_true, y_hat.argmax(axis=1))
print('Balanced Accuracy:', round(b_acc, 3), '|', 'MCC:', round(mcc, 3), '|', 'AUC:', round(auc, 3))
current_metrics = pd.DataFrame([[epoch, 'train', running_loss / (i + 1), b_acc, mcc, auc]], columns=history.columns)
current_metrics.to_csv(os.path.join(model_dir, 'history.csv'), mode='a', header=False, index=False)
return history.append(current_metrics)
def train(model, device, loss_fxn, optimizer, data_loader, history, epoch, model_dir, classes, mixup, mixup_alpha):
"""Train PyTorch model for one epoch on NIH ChestXRay14 dataset.
Parameters
----------
model : PyTorch model
device : PyTorch device
loss_fxn : PyTorch loss function
optimizer : PyTorch optimizer
data_loader : PyTorch data loader
history : pandas DataFrame
Data frame containing history of training metrics
epoch : int
Current epoch number (1-K)
model_dir : str
Path to output directory where metrics, model weights, etc. will be stored
classes : list[str]
Ordered list of names of output classes
Returns
-------
history : pandas DataFrame
Updated history data frame with metrics from completed training epoch
"""
pbar = tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc=f'Epoch {epoch}')
running_loss = 0.
y_true, y_hat = [], []
for i, (x, y) in pbar:
x = x.to(device)
y = y.to(device)
if mixup:
x, y_a, y_b, lam = mixup_data(x, y, mixup_alpha, True)
out = model(x)
if mixup:
loss = mixup_criterion(loss_fxn, out, y_a, y_b, lam)
else:
loss = loss_fxn(out, y)
for param in model.parameters():
param.grad = None
loss.backward()
optimizer.step()
running_loss += loss.item()
y_hat.append(out.softmax(dim=1).detach().cpu().numpy())
y_true.append(y.detach().cpu().numpy())
pbar.set_postfix({'loss': running_loss / (i + 1)})
# Collect true and predicted labels into flat numpy arrays
y_true, y_hat = np.concatenate(y_true), np.concatenate(y_hat)
# Compute metrics
auc = roc_auc_score(y_true, y_hat, average='macro', multi_class='ovr')
b_acc = balanced_accuracy_score(y_true, y_hat.argmax(axis=1))
mcc = matthews_corrcoef(y_true, y_hat.argmax(axis=1))
print('Balanced Accuracy:', round(b_acc, 3), '|', 'MCC:', round(mcc, 3), '|', 'AUC:', round(auc, 3))
current_metrics = pd.DataFrame([[epoch, 'train', running_loss / (i + 1), b_acc, mcc, auc]], columns=history.columns)
current_metrics.to_csv(os.path.join(model_dir, 'history.csv'), mode='a', header=False, index=False)
return history.append(current_metrics)
def validate(model, device, loss_fxn, optimizer, data_loader, history, epoch, model_dir, early_stopping_dict, best_model_wts, classes):
"""Evaluate PyTorch model on validation set of NIH ChestXRay14 dataset.
Parameters
----------
model : PyTorch model
device : PyTorch device
loss_fxn : PyTorch loss function
ls : int
Ratio of label smoothing to apply during loss computation
optimizer : PyTorch optimizer
data_loader : PyTorch data loader
history : pandas DataFrame
Data frame containing history of training metrics
epoch : int
Current epoch number (1-K)
model_dir : str
Path to output directory where metrics, model weights, etc. will be stored
early_stopping_dict : dict
Dictionary of form {'epochs_no_improve': <int>, 'best_loss': <float>} for early stopping
best_model_wts : PyTorch state_dict
Model weights from best epoch
classes : list[str]
Ordered list of names of output classes
fusion : bool
Whether or not fusion is being performed (image + metadata inputs)
meta_only : bool
Whether or not to train on *only* metadata as input
Returns
-------
history : pandas DataFrame
Updated history data frame with metrics from completed training epoch
early_stopping_dict : dict
Updated early stopping metrics
best_model_wts : PyTorch state_dict
(Potentially) updated model weights (if best validation loss achieved)
"""
model.eval()
pbar = tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc=f'[VAL] Epoch {epoch}')
running_loss = 0.
y_true, y_hat = [], []
with torch.no_grad():
for i, (x, y) in pbar:
x = x.to(device)
y = y.to(device)
out = model(x)
loss = loss_fxn(out, y)
running_loss += loss.item()
y_hat.append(out.softmax(dim=1).detach().cpu().numpy())
y_true.append(y.detach().cpu().numpy())
pbar.set_postfix({'loss': running_loss / (i + 1)})
# Collect true and predicted labels into flat numpy arrays
y_true, y_hat = np.concatenate(y_true), np.concatenate(y_hat)
# Compute metrics
auc = roc_auc_score(y_true, y_hat, average='macro', multi_class='ovr')
b_acc = balanced_accuracy_score(y_true, y_hat.argmax(axis=1))
mcc = matthews_corrcoef(y_true, y_hat.argmax(axis=1))
print('[VAL] Balanced Accuracy:', round(b_acc, 3), '|', 'MCC:', round(mcc, 3), '|', 'AUC:', round(auc, 3))
current_metrics = pd.DataFrame([[epoch, 'val', running_loss / (i + 1), b_acc, mcc, auc]], columns=history.columns)
current_metrics.to_csv(os.path.join(model_dir, 'history.csv'), mode='a', header=False, index=False)
# Early stopping: save model weights only when val (balanced) accuracy has improved
if b_acc > early_stopping_dict['best_acc']:
print(f'--- EARLY STOPPING: Accuracy has improved from {round(early_stopping_dict["best_acc"], 3)} to {round(b_acc, 3)}! Saving weights. ---')
early_stopping_dict['epochs_no_improve'] = 0
early_stopping_dict['best_acc'] = b_acc
best_model_wts = deepcopy(model.state_dict())
torch.save({'weights': best_model_wts, 'optimizer': optimizer.state_dict()}, os.path.join(model_dir, f'chkpt_epoch-{epoch}.pt'))
else:
print(f'--- EARLY STOPPING: Accuracy has not improved from {round(early_stopping_dict["best_acc"], 3)} ---')
early_stopping_dict['epochs_no_improve'] += 1
return history.append(current_metrics), early_stopping_dict, best_model_wts
def evaluate(model, device, loss_fxn, dataset, split, batch_size, history, model_dir, weights):
"""Evaluate PyTorch model on test set of NIH ChestXRay14 dataset. Saves training history csv, summary text file, training curves, etc.
Parameters
----------
model : PyTorch model
device : PyTorch device
loss_fxn : PyTorch loss function
ls : int
Ratio of label smoothing to apply during loss computation
batch_size : int
history : pandas DataFrame
Data frame containing history of training metrics
model_dir : str
Path to output directory where metrics, model weights, etc. will be stored
weights : PyTorch state_dict
Model weights from best epoch
n_TTA : int
Number of augmented copies to use for test-time augmentation (0-K)
fusion : bool
Whether or not fusion is being performed (image + metadata inputs)
meta_only : bool
Whether or not to train on *only* metadata as input
"""
model.load_state_dict(weights) # load best weights
model.eval()
## INFERENCE
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=8 if split == 'test' else 2, pin_memory=True, worker_init_fn=val_worker_init_fn)
pbar = tqdm.tqdm(enumerate(data_loader), total=len(data_loader), desc=f'[{split.upper()}] EVALUATION')
running_loss = 0.
y_true, y_hat = [], []
with torch.no_grad():
for i, (x, y) in pbar:
x = x.to(device)
y = y.to(device)
out = model(x)
loss = loss_fxn(out, y)
running_loss += loss.item()
y_hat.append(out.softmax(dim=1).detach().cpu().numpy())
y_true.append(y.detach().cpu().numpy())
pbar.set_postfix({'loss': running_loss / (i + 1)})
# Collect true and predicted labels into flat numpy arrays
y_true, y_hat = np.concatenate(y_true), np.concatenate(y_hat)
# Compute metrics
auc = roc_auc_score(y_true, y_hat, average='macro', multi_class='ovr')
b_acc = balanced_accuracy_score(y_true, y_hat.argmax(axis=1))
conf_mat = confusion_matrix(y_true, y_hat.argmax(axis=1))
accuracies = conf_mat.diagonal() / conf_mat.sum(axis=1)
mcc = matthews_corrcoef(y_true, y_hat.argmax(axis=1))
cls_report = classification_report(y_true, y_hat.argmax(axis=1), target_names=dataset.CLASSES, digits=3)
print(f'[{split.upper()}] Balanced Accuracy: {round(b_acc, 3)} | MCC: {round(mcc, 3)} | AUC: {round(auc, 3)}')
# Collect and save true and predicted disease labels for test set
pred_df = pd.DataFrame(y_hat, columns=dataset.CLASSES)
true_df = pd.DataFrame(LabelBinarizer().fit(range(len(dataset.CLASSES))).transform(y_true), columns=dataset.CLASSES)
pred_df.to_csv(os.path.join(model_dir, f'{split}_pred.csv'), index=False)
true_df.to_csv(os.path.join(model_dir, f'{split}_true.csv'), index=False)
# Plot confusion matrix
fig, ax = plot_confusion_matrix(conf_mat, figsize=(24, 24), colorbar=True, show_absolute=True, show_normed=True, class_names=dataset.CLASSES)
fig.savefig(os.path.join(model_dir, f'{split}_cm.png'), dpi=300, bbox_inches='tight')
# Plot loss curves
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(history.loc[history['phase'] == 'train', 'epoch'], history.loc[history['phase'] == 'train', 'loss'], label='train')
ax.plot(history.loc[history['phase'] == 'val', 'epoch'], history.loc[history['phase'] == 'val', 'loss'], label='val')
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
ax.legend()
fig.savefig(os.path.join(model_dir, 'loss.png'), dpi=300, bbox_inches='tight')
# Plot accuracy curves
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(history.loc[history['phase'] == 'train', 'epoch'], history.loc[history['phase'] == 'train', 'balanced_acc'], label='train')
ax.plot(history.loc[history['phase'] == 'val', 'epoch'], history.loc[history['phase'] == 'val', 'balanced_acc'], label='val')
ax.set_xlabel('Epoch')
ax.set_ylabel('Balanced Accuracy')
ax.legend()
fig.savefig(os.path.join(model_dir, 'balanced_acc.png'), dpi=300, bbox_inches='tight')
# Plot AUROC learning curves
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(history.loc[history['phase'] == 'train', 'epoch'], history.loc[history['phase'] == 'train', 'auroc'], label='train')
ax.plot(history.loc[history['phase'] == 'val', 'epoch'], history.loc[history['phase'] == 'val', 'auroc'], label='val')
ax.set_xlabel('Epoch')
ax.set_ylabel('AUROC')
ax.legend()
fig.savefig(os.path.join(model_dir, 'auc.png'), dpi=300, bbox_inches='tight')
# Create summary text file describing final performance
summary = f'Balanced Accuracy: {round(b_acc, 3)}\n'
summary += f'Matthews Correlation Coefficient: {round(mcc, 3)}\n'
summary += f'Mean AUC: {round(auc, 3)}\n\n'
summary += 'Class:| Accuracy\n'
for i, c in enumerate(dataset.CLASSES):
summary += f'{c}:| {round(accuracies[i], 3)}\n'
summary += '\n'
summary += cls_report
f = open(os.path.join(model_dir, f'{split}_summary.txt'), 'w')
f.write(summary)
f.close() | 15,395 | 38.88601 | 186 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.