repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Smile-Pruning | Smile-Pruning-master/src/dataset/cifar100.py | from torchvision.datasets import CIFAR100
import torchvision.transforms as transforms
def get_dataset(data_path, batch_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)), # ref to EigenDamage code
# transforms.Normalize((0.4914, 0.4822, 0.4465),
# (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
# transforms.Normalize((0.4914, 0.4822, 0.4465),
# (0.2023, 0.1994, 0.2010)),
])
train_set = CIFAR100(data_path,
train=True,
download=True,
transform=transform_train)
test_set = CIFAR100(data_path,
train=False,
download=True,
transform=transform_test)
return train_set, test_set
num_classes = 100
input_shape = (3, 32, 32) | 1,203 | 33.4 | 107 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/fmnist.py | from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
def get_dataset(data_path, batch_size):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(
(0.1307,), (0.3081,))
])
train_set = FashionMNIST(data_path,
train=True,
download=True,
transform=transform)
test_set = FashionMNIST(data_path,
train=False,
download=True,
transform=transform)
return train_set, test_set
num_classes = 10
input_shape = (1, 32, 32) | 728 | 28.16 | 45 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/cifar10.py | from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
def get_dataset(data_path, batch_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465),
# (0.2023, 0.1994, 0.2010)), # ref to: https://github.com/kuangliu/pytorch-cifar/blob/master/main.py
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]) # these mean and var are from official PyTorch ImageNet example
])
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465),
# (0.2023, 0.1994, 0.2010)),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_set = CIFAR10(data_path,
train=True,
download=True,
transform=transform_train)
test_set = CIFAR10(data_path,
train=False,
download=True,
transform=transform_test)
return train_set, test_set
num_classes = 10
input_shape = (3, 32, 32) | 1,406 | 37.027027 | 129 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/__init__.py |
from importlib import import_module
import os
import numpy as np
import torch
from torch.utils import data
from torch.utils.data import DataLoader
class Data(object):
def __init__(self, args):
self.args = args
dataset = import_module("dataset.%s" % args.dataset)
path = os.path.join(args.data_path, args.dataset)
train_set, test_set = dataset.get_dataset(path, args.batch_size)
self.train_loader = DataLoader(train_set,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=True,
pin_memory=True)
self.train_loader_prune = DataLoader(train_set,
batch_size=args.batch_size_prune,
num_workers=args.workers,
shuffle=True,
pin_memory=True)
self.test_loader = DataLoader(test_set,
batch_size=256,
num_workers=args.workers,
shuffle=False,
pin_memory=True) | 1,294 | 39.46875 | 72 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/celeba.py | import numpy as np
import os
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
pjoin = os.path.join
def is_img(x):
_, ext = os.path.splitext(x)
return ext.lower() in ['.jpg', '.png', '.bmp', '.jpeg']
class CelebA(data.Dataset):
'''
Only for the most balanced attribute "Attractive".
Deprecated. This class is not fully worked through. Be careful.
'''
def __init__(self, img_dir, label_file, transform):
self.img_list = [os.path.join(img_dir, i) for i in os.listdir(
img_dir) if i.endswith(".npy")]
self.transform = transform
if label_file.endswith(".npy"):
self.label = np.load(label_file) # label file is npy
else:
self.label = {}
for line in open(label_file): # label file is txt
if ".jpg" not in line:
continue
img_name, *attr = line.strip().split()
# "Attractive" is at the 3rd position of all attrs
self.label[img_name] = int(attr[2] == "1")
def __getitem__(self, index):
img_path = self.img_list[index]
img_name = img_path.split("/")[-1]
img = Image.open(img_path).convert("RGB")
img = img.resize((224, 224)) # for alexnet
img = self.transform(img)
return img.squeeze(0), self.label[img_name]
def __len__(self):
return len(self.img_list)
class CelebA_npy(data.Dataset):
def __init__(self, npy_dir, label_file, transform):
self.npy_list = [os.path.join(npy_dir, i) for i in os.listdir(
npy_dir) if i.endswith(".npy") and i != "batch.npy"]
self.transform = transform
# label_file should be an npy
self.label = torch.from_numpy(np.load(label_file)).long()
def __getitem__(self, index):
npy = self.npy_list[index]
img = np.load(npy)
img = Image.fromarray(img)
img = self.transform(img)
return img.squeeze(0), self.label[int(npy.split("/")[-1].split(".")[0])]
def __len__(self):
return len(self.npy_list)
class Dataset_npy_batch(data.Dataset):
def __init__(self, npy_dir, transform):
self.data = np.load(os.path.join(npy_dir, "batch.npy"))
self.transform = transform
def __getitem__(self, index):
img = Image.fromarray(self.data[index][0])
img = self.transform(img)
label = self.data[index][1]
label = torch.LongTensor([label])[0]
return img.squeeze(0), label
def __len__(self):
return len(self.data)
def get_dataset(data_path, batch_size):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_data_path = pjoin(data_path, "train_npy")
train_label_path = pjoin(data_path, "CelebA_Attractive_label.npy")
test_path = pjoin(data_path, "test_npy")
assert(os.path.exists(train_data_path))
assert(os.path.exists(train_label_path))
assert(os.path.exists(test_path))
train_set = CelebA_npy(
train_data_path, train_label_path, transform=transform_train)
test_set = Dataset_npy_batch(test_path, transform=transform_test)
return train_set, test_set | 3,610 | 31.827273 | 80 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/tiny_imagenet.py | import numpy as np
import torch
import torch.utils.data as data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from option import args
from PIL import Image
import os
from utils import Dataset_npy_batch
# refer to: https://github.com/pytorch/examples/blob/master/imagenet/main.py
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.RandomCrop(64, padding=8), # refer to the cifar case
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
def get_dataset(data_path, batch_size):
train_set = Dataset_npy_batch(
data_path + "/train",
transform=transform_train,
)
test_set = Dataset_npy_batch(
data_path + "/val",
transform=transform_test,
)
return train_set, test_set
num_classes = 200
input_shape = (3, 64, 64) | 1,075 | 26.589744 | 76 | py |
Smile-Pruning | Smile-Pruning-master/src/dataset/mnist.py | from torchvision.datasets.mnist import MNIST
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
def get_dataset(data_path, batch_size):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(
(0.1307,), (0.3081,))
])
train_set = MNIST(data_path,
train=True,
download=True,
transform=transform)
test_set = MNIST(data_path,
train=False,
download=True,
transform=transform)
return train_set, test_set
num_classes = 10
input_shape = (1, 32, 32) | 713 | 27.56 | 44 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/prune.py |
from .pruner import pruner_dict
def prune(model, loader, args, logger):
# Read config file, get pruner name
pruner = pruner_dict[args.pruner].Pruner(model, loader, args, logger)
pruner.prune()
print(f'==> Prune is done.')
# -- Debug to check if the pruned weights are really zero. Confirmed!
# import torch.nn as nn
# for name, m in pruner.model.named_modules():
# if isinstance(m, (nn.Conv2d, nn.Linear)):
# print(m.weight.data.abs().min())
# --
return pruner.model
| 526 | 30 | 73 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/train.py |
import torch
import torch.nn as nn
from utils import PresetLRScheduler, adjust_learning_rate, AverageMeter, ProgressMeter, accuracy
from utils import get_n_params, get_n_flops, get_n_params_, get_n_flops_
from utils import add_noise_to_model, compute_jacobian, _weights_init_orthogonal, get_jacobian_singular_values
from utils import Timer
import shutil, time, os
import numpy as np
pjoin = os.path.join
def save_ckpt(save_dir, ckpt, is_best=False, mark=''):
out = pjoin(save_dir, "ckpt_last.pth")
torch.save(ckpt, out)
if is_best:
out_best = pjoin(save_dir, "ckpt_best.pth")
torch.save(ckpt, out_best)
if mark:
out_mark = pjoin(save_dir, "ckpt_{}.pth".format(mark))
torch.save(ckpt, out_mark)
def one_epoch_train(train_loader, model, criterion, optimizer, epoch, args, print_log=True):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# Switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# Measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
if hasattr(args, 'advanced_lr'):
lr = adjust_learning_rate_v2(optimizer, epoch, i, len(train_loader))
args.advanced_lr.lr = lr
if i == 10: print(f'==> Set LR to {lr:.6f} Epoch {epoch} Iter {i}')
# Compute output
output = model(images)
loss = criterion(output, target)
# Measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# Orth regularization
if args.orth_reg_iter_ft:
loss_orth_reg, cnt = 0, -1
for name, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
cnt += 1
if args.orth_reg_method in ['CVPR20']:
if cnt != 0: # per the CVPR20 paper, do not reg the 1st conv
shape = module.weight.shape
if len(shape) == 2 or shape[-1] == 1: # FC and 1x1 conv
loss_orth_reg += orth_dist(module.weight)
else:
loss_orth_reg += deconv_orth_dist(module.weight)
elif args.orth_reg_method in ['CVPR17']:
loss_orth_reg += orth_dist(module.weight)
else:
raise NotImplementedError
loss += loss_orth_reg * args.lw_orth_reg
if i % args.print_interval == 0:
print(f'loss_orth_reg (*{args.lw_orth_reg}) {loss_orth_reg:.10f} Epoch {epoch} Iter {i}')
# Compute gradient and SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# After update, zero out pruned weights
if args.wg == 'weight' and hasattr(model, 'mask'):
apply_mask_forward(model, model.mask)
# Util functionality, check the gradient norm of params
if hasattr(args, 'utils') and args.utils.check_grad_norm:
from utils import check_grad_norm
if i % args.print_interval == 0:
print(''); print(f'(** Start check_grad_norm. Epoch {epoch} Step {i} **)')
check_grad_norm(model)
print(f'(** End check_grad_norm **)'); print('')
# Util functionality, check the gradient norm of params
if hasattr(args, 'utils') and args.utils.check_weight_stats:
from utils import check_weight_stats
if i % args.print_interval == 0:
print(''); print(f'(** Start check_weight_stats. Epoch {epoch} Step {i} **)')
check_weight_stats(model)
print(f'(** End check_weight_stats **)'); print('')
# Check Jacobian singular value (JSV) # TODO-@mst: move to utility
if args.jsv_interval == -1:
args.jsv_interval = len(train_loader) # default: check jsv at the last iteration
if args.jsv_loop and (i + 1) % args.jsv_interval == 0:
jsv, jsv_diff, cn = get_jacobian_singular_values(model, train_loader, num_classes=args.passer['num_classes'], n_loop=args.jsv_loop, print_func=print, rand_data=args.jsv_rand_data)
print('JSV_mean %.4f JSV_std %.4f JSV_max %.4f JSV_min %.4f Condition_Number_mean %.4f JSV_diff_mean %.4f JSV_diff_std %.4f -- Epoch %d Iter %d' %
(np.mean(jsv), np.std(jsv), np.max(jsv), np.min(jsv), np.mean(cn), np.mean(jsv_diff), np.std(jsv_diff), epoch, i))
# Measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if print_log and i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args, noisy_model_ensemble=False):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
train_state = model.training
# switch to evaluate mode
model.eval()
# @mst: add noise to model
model_ensemble = []
if noisy_model_ensemble:
for i in range(args.model_noise_num):
noisy_model = add_noise_to_model(model, std=args.model_noise_std)
model_ensemble.append(noisy_model)
print('==> added Gaussian noise to model weights (std=%s, num=%d)' % (args.model_noise_std, args.model_noise_num))
else:
model_ensemble.append(model)
time_compute = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
t1 = time.time()
output = 0
for model in model_ensemble: # @mst: test model ensemble
output += model(images)
output /= len(model_ensemble)
time_compute.append((time.time() - t1) / images.size(0))
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
# print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
# @mst: commented because we will use another print outside 'validate'
# print("time compute: %.4f ms" % (np.mean(time_compute)*1000))
# change back to original model state if necessary
if train_state:
model.train()
return top1.avg.item(), top5.avg.item(), losses.avg # @mst: added returning top5 acc and loss
def adjust_learning_rate_v2(optimizer, epoch, iteration, num_iter):
'''More advanced LR scheduling. Refers to d-li14 MobileNetV2 ImageNet implementation:
https://github.com/d-li14/mobilenetv2.pytorch/blob/1733532bd43743442077326e1efc556d7cfd025d/imagenet.py#L374
'''
assert hasattr(args, 'advanced_lr')
warmup_iter = args.advanced_lr.warmup_epoch * num_iter # num_iter: num_iter_per_epoch
current_iter = iteration + epoch * num_iter
max_iter = args.epochs * num_iter
if epoch < args.advanced_lr.warmup_epoch:
lr = args.lr * current_iter / warmup_iter
else:
if args.advanced_lr.lr_decay == 'step':
lr = args.lr * (args.advanced_lr.gamma ** ((current_iter - warmup_iter) / (max_iter - warmup_iter)))
elif args.advanced_lr.lr_decay == 'cos':
lr = args.lr * (1 + math.cos(math.pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
elif args.advanced_lr.lr_decay == 'linear':
lr = args.lr * (1 - (current_iter - warmup_iter) / (max_iter - warmup_iter))
elif args.advanced_lr.lr_decay == 'schedule':
count = sum([1 for s in args.advanced_lr.schedule if s <= epoch])
lr = args.lr * pow(args.advanced_lr.gamma, count)
else:
raise ValueError('Unknown lr mode {}'.format(args.advanced_lr.lr_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def apply_mask_forward(model, mask):
for name, m in model.named_modules():
if name in mask:
m.weight.data.mul_(mask[name])
def train(model, loader, args, logger):
train_loader = loader.train_loader
val_loader = loader.test_loader
best_acc1, best_acc1_epoch = 0, 0
print_log = True
accprint = logger.accprint
criterion = logger.passer['criterion']
# Save the model after initialization (useful for LTH)
if args.save_init_model:
ckpt = {
'arch': args.arch,
'model': model,
'state_dict': model.state_dict(),
'ExpID': logger.ExpID,
}
save_path = f'{logger.weights_path}/ckpt_init.pth'
torch.save(ckpt, save_path)
logger.passer['ckpt_init'] = save_path
print(f'==> Save initial weights at "{save_path}"')
# since model is new, we need a new optimizer
if args.solver == 'Adam':
print('==> Start to finetune: using Adam optimizer')
optimizer = torch.optim.Adam(model.parameters(), args.lr)
else:
print('==> Start to finetune: using SGD optimizer')
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# set lr finetune schduler for finetune
if args.pipeline:
assert args.lr_ft is not None
lr_scheduler = PresetLRScheduler(args.lr_ft)
acc1_list, loss_train_list, loss_test_list, last_lr = [], [], [], 0
timer = Timer(args.epochs - args.start_epoch)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# @mst: use our own lr scheduler
if not hasattr(args, 'advanced_lr'): # 'advanced_lr' can override 'lr_scheduler' and 'adjust_learning_rate'
lr = lr_scheduler(optimizer, epoch) if args.pipeline else adjust_learning_rate(optimizer, epoch, args)
if print_log:
print("==> Set lr = %s @ Epoch %d begins" % (lr, epoch))
# save model if LR just changed
if last_lr !=0 and lr != last_lr:
state = {'epoch': epoch, # this is to save the model of last epoch
'arch': args.arch,
'model': model,
'state_dict': model.state_dict(),
'acc1': acc1,
'acc5': acc5,
'optimizer': optimizer.state_dict(),
'ExpID': logger.ExpID,
'prune_state': 'finetune',
}
if args.wg == 'weight':
state['mask'] = mask
save_ckpt(save_dir, state, mark=f'lr{last_lr}_epoch{epoch}')
print(f'==> Save ckpt at the last epoch ({epoch}) of LR {last_lr}')
# train for one epoch
one_epoch_train(train_loader, model, criterion, optimizer, epoch, args, print_log=print_log)
if hasattr(args, 'advanced_lr'): # advanced_lr will adjust lr inside the train fn
lr = args.advanced_lr.lr
last_lr = lr
# @mst: check weights magnitude during finetune
if args.pipeline in ['GReg-1', 'GReg-2'] and not isinstance(pruner, type(None)):
for name, m in model.named_modules():
if name in pruner.reg:
ix = pruner.layers[name].layer_index
mag_now = m.weight.data.abs().mean()
mag_old = pruner.original_w_mag[name]
ratio = mag_now / mag_old
tmp = '[%2d] %25s -- mag_old = %.4f, mag_now = %.4f (%.2f)' % (ix, name, mag_old, mag_now, ratio)
original_print(tmp, file=logger.logtxt, flush=True)
if args.screen_print:
print(tmp)
# evaluate on validation set
acc1, acc5, loss_test = validate(val_loader, model, criterion, args) # @mst: added acc5
if args.dataset not in ['imagenet'] and args.test_trainset: # too costly, not test for now
acc1_train, acc5_train, loss_train = validate(train_loader, model, criterion, args)
else:
acc1_train, acc5_train, loss_train = -1, -1, -1
acc1_list.append(acc1)
loss_train_list.append(loss_train)
loss_test_list.append(loss_test)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
best_acc1_epoch = epoch
best_loss_train = loss_train
best_loss_test = loss_test
if print_log:
accprint("Acc1 %.4f Acc5 %.4f Loss_test %.4f | Acc1_train %.4f Acc5_train %.4f Loss_train %.4f | Epoch %d (Best_Acc1 %.4f @ Best_Acc1_Epoch %d) lr %s" %
(acc1, acc5, loss_test, acc1_train, acc5_train, loss_train, epoch, best_acc1, best_acc1_epoch, lr))
print('predicted finish time: %s' % timer())
ngpus_per_node = torch.cuda.device_count()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
ckpt = {'epoch': epoch + 1,
'arch': args.arch,
'model': model,
'state_dict': model.state_dict(),
'acc1': acc1,
'acc5': acc5,
'optimizer': optimizer.state_dict(),
'ExpID': logger.ExpID,
'prune_state': 'finetune',
}
if args.wg == 'weight' and hasattr(model, 'mask'):
ckpt['mask'] = model.mask
save_ckpt(logger.weights_path, ckpt, is_best)
print(f'==> Train is done.')
return model
| 15,266 | 42.87069 | 191 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/layer.py | import torch.nn as nn
import torch
class Layer():
'''A neat class to maintain network layer structure'''
def __init__(self, name, size, layer_index, layer_type=None, res=False, last=None):
self.name = name
self.size = []
for x in size:
self.size.append(x)
self.layer_index = layer_index # layer id
self.layer_type = layer_type
self.last = last # last layer
self.is_shortcut = True if "downsample" in name else False
if res:
self.stage, self.seq_index, self.block_index = self._get_various_index_by_name(name)
def _get_various_index_by_name(self, name):
'''Get the indeces including stage, seq_ix, blk_ix.
Same stage means the same feature map size.
'''
global lastest_stage # an awkward impel, just for now
if name.startswith('module.'):
name = name[7:] # remove the prefix caused by pytorch data parallel
if "conv1" == name: # TODO: this might not be so safe
lastest_stage = 0
return 0, None, None
if "linear" in name or 'fc' in name: # Note: this can be risky. Check it fully. TODO: @mingsun-tse
return lastest_stage + 1, None, None # fc layer should always be the last layer
else:
try:
stage = int(name.split(".")[0][-1]) # ONLY work for standard resnets. name example: layer2.2.conv1, layer4.0.downsample.0
seq_ix = int(name.split(".")[1])
if 'conv' in name.split(".")[-1]:
blk_ix = int(name[-1]) - 1
else:
blk_ix = -1 # shortcut layer
lastest_stage = stage
return stage, seq_ix, blk_ix
except:
print('!Parsing the layer name failed: %s. Please check.' % name)
def register_modulename(model):
for name, module in model.named_modules():
module.name = name
def register_hook(model, **kwargs):
""" 'learnable_layers' is a constant """
last_module = [None]
def hook(module, input, output):
kwargs['max_len_name'] = max(kwargs['max_len_name'], len(module.name))
kwargs['layers'][module.name] = Layer(name=module.name,
size=module.weight.size(),
layer_index=len(kwargs['layers']),
layer_type=module.__class__.__name__,
res=kwargs['res'],
last=last_module[0])
last_module[0] = module.name
def register(module, handles):
children = list(module.children())
if len(children) == 0:
if isinstance(module, kwargs['learnable_layers']):
handles += [module.register_forward_hook(hook)]
else:
for c in children:
register(c, handles)
handles = [] # used for remove hooks
register(model, handles)
return handles
def rm_hook(handles):
[x.remove() for x in handles] | 3,125 | 40.68 | 138 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/l1_pruner.py | import torch
import torch.nn as nn
import copy
import time
import numpy as np
import torch.optim as optim
from .meta_pruner import MetaPruner
# from .reinit_model import orth_dist, deconv_orth_dist
from utils import Timer
class Pruner(MetaPruner):
def __init__(self, model, loader, args, logger):
super(Pruner, self).__init__(model, loader, args, logger)
def prune(self):
if self.args.orth_reg_iter > 0:
self.netprint('\n--> Start orthogonal regularization training.')
self.model = self._orth_reg_train(self.model) # update self.model
self.netprint('<-- End orthogonal regularization training.\n')
self._get_kept_wg_L1()
self._prune_and_build_new_model()
return self.model
def _orth_reg_train(self, model):
optimizer = optim.SGD(model.parameters(),
lr=self.args.lr_prune,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
acc1 = acc5 = 0
epoch = -1
timer = Timer(self.args.orth_reg_iter / self.args.print_interval)
self.total_iter = -1
self.prune_state = 'orth_reg'
while True:
epoch += 1
for _, (inputs, targets) in enumerate(self.train_loader):
inputs, targets = inputs.cuda(), targets.cuda()
self.total_iter += 1
total_iter = self.total_iter
if total_iter % self.args.print_interval == 0:
print("")
print("Iter = %d [prune_state = %s, method = %s] "
% (total_iter, self.prune_state, self.args.method) + "-"*40)
# forward
model.train()
y_ = model(inputs)
# normal training forward
loss = self.criterion(y_, targets)
logtmp = f'loss_cls {loss:.4f}'
# Orth reg
loss_orth_reg = 0
for name, module in model.named_modules():
if isinstance(module, self.learnable_layers):
if self.args.orth_reg_method in ['CVPR20']:
if self.layers[name].layer_index != 0: # per the CVPR20 paper, do not reg the 1st conv
shape = self.layers[name].size
if len(shape) == 2 or shape[-1] == 1: # FC and 1x1 conv
loss_orth_reg += orth_dist(module.weight)
else:
loss_orth_reg += deconv_orth_dist(module.weight)
elif self.args.orth_reg_method in ['CVPR17']:
loss_orth_reg += orth_dist(module.weight)
else:
raise NotImplementedError
loss += loss_orth_reg * self.args.lw_orth_reg
# print loss
if self.total_iter % self.args.print_interval == 0:
logtmp += f' loss_orth_reg (*{self.args.lw_orth_reg}) {loss_orth_reg:.10f} Iter {self.total_iter}'
print(logtmp)
print(f"predicted_finish_time of orth_reg: {timer()}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
# test
if total_iter % self.args.test_interval == 0:
acc1, acc5, *_ = self.test(model)
self.accprint("Acc1 = %.4f Acc5 = %.4f Iter = %d (after update) [prune_state = %s, method = %s]" %
(acc1, acc5, total_iter, self.prune_state, self.args.method))
# save model (save model before a batch starts)
if total_iter % self.args.save_interval == 0:
self._save_model(model, optimizer, acc1, acc5)
print('Periodically save model done. Iter = {}'.format(total_iter))
# return
if total_iter > self.args.orth_reg_iter:
return copy.deepcopy(model)
def _save_model(self, model, optimizer, acc1=0, acc5=0, mark=''):
state = {'iter': self.total_iter,
'arch': self.args.arch,
'model': model,
'state_dict': model.state_dict(),
'acc1': acc1,
'acc5': acc5,
'optimizer': optimizer.state_dict(),
'ExpID': self.logger.ExpID,
}
self.save(state, is_best=False, mark=mark) | 4,717 | 43.093458 | 119 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/reg_pruner.py | import torch
import torch.nn as nn
import torch.optim as optim
import os, copy, time, pickle, numpy as np, math
from .meta_pruner import MetaPruner
from utils import plot_weights_heatmap, Timer, AverageMeter, ProgressMeter, accuracy
import matplotlib.pyplot as plt
pjoin = os.path.join
tensor2list = lambda x: x.cpu().data.numpy().tolist()
tensor2array = lambda x: x.cpu().data.numpy()
class Pruner(MetaPruner):
def __init__(self, model, loader, args, logger, passer):
super(Pruner, self).__init__(model, loader, args, logger, passer)
# Reg related variables
self.reg = {}
self.delta_reg = {}
self.hist_mag_ratio = {}
self.n_update_reg = {}
self.iter_update_reg_finished = {}
self.iter_finish_pick = {}
self.iter_stabilize_reg = math.inf
self.original_w_mag = {}
self.original_kept_w_mag = {}
self.ranking = {}
self.pruned_wg_L1 = {}
self.all_layer_finish_pick = False
self.w_abs = {}
self.mag_reg_log = {}
if self.args.__dict__.get('AdaReg_only_picking'): # AdaReg is the old name for greg2
self.original_model = copy.deepcopy(self.model)
# prune_init, to determine the pruned weights
# this will update the 'self.kept_wg' and 'self.pruned_wg'
if self.args.pruner in ['greg1', 'greg2']:
self._get_kept_wg_L1()
for k, v in self.pruned_wg.items():
self.pruned_wg_L1[k] = v
if self.args.pruner == 'greg2': # greg2 will determine which wgs to prune later, so clear it here
self.kept_wg = {}
self.pruned_wg = {}
self.prune_state = "update_reg"
for name, m in self.model.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
shape = m.weight.data.shape
# initialize reg
if self.args.wg == 'weight':
self.reg[name] = torch.zeros_like(m.weight.data).flatten().cuda()
else:
self.reg[name] = torch.zeros(shape[0], shape[1]).cuda()
# get original weight magnitude
w_abs = self._get_score(m)
n_wg = len(w_abs)
self.ranking[name] = []
for _ in range(n_wg):
self.ranking[name].append([])
self.original_w_mag[name] = m.weight.abs().mean().item()
kept_wg_L1 = [i for i in range(n_wg) if i not in self.pruned_wg_L1[name]]
self.original_kept_w_mag[name] = w_abs[kept_wg_L1].mean().item()
def _pick_pruned_wg(self, w, pr):
'''return a list of indices of pruned weight groups'''
if pr == 0:
return []
elif pr > 0:
w = w.flatten()
n_pruned = min(math.ceil(pr * w.size(0)), w.size(0) - 1) # do not prune all
return tensor2list(w.sort()[1][:n_pruned])
elif pr == -1: # automatically decide lr by each layer itself
tmp = w.flatten().sort()[0]
n_not_consider = int(len(tmp) * 0.02)
w = tmp[n_not_consider:-n_not_consider]
sorted_w, sorted_index = w.flatten().sort()
max_gap = 0
max_index = 0
for i in range(len(sorted_w) - 1):
# gap = sorted_w[i+1:].mean() - sorted_w[:i+1].mean()
gap = sorted_w[i+1] - sorted_w[i]
if gap > max_gap:
max_gap = gap
max_index = i
max_index += n_not_consider
return tensor2list(sorted_index[:max_index + 1])
else:
print('Wrong pruning ratio. Please check.')
exit(1)
def _update_mag_ratio(self, m, name, w_abs, pruned=None):
if pruned is None:
pruned = self.pruned_wg[name]
kept = list(set(range(len(w_abs))) - set(pruned))
ave_mag_pruned = w_abs[pruned].mean()
ave_mag_kept = w_abs[kept].mean()
if len(pruned):
mag_ratio = ave_mag_kept / ave_mag_pruned
self.hist_mag_ratio[name] = self.hist_mag_ratio[name] * 0.9 + mag_ratio * 0.1 if name in self.hist_mag_ratio else mag_ratio
else:
mag_ratio = math.inf
self.hist_mag_ratio[name] = math.inf
# print
mag_ratio_now_before = ave_mag_kept / self.original_kept_w_mag[name]
if self.total_iter % self.args.print_interval == 0:
print(" mag_ratio %.4f mag_ratio_momentum %.4f" % (mag_ratio, self.hist_mag_ratio[name]))
print(" for kept weights, original_kept_w_mag %.6f, now_kept_w_mag %.6f ratio_now_over_original %.4f" %
(self.original_kept_w_mag[name], ave_mag_kept, mag_ratio_now_before))
return mag_ratio_now_before
def _get_score(self, m):
shape = m.weight.data.shape
if self.args.wg == "channel":
w_abs = m.weight.abs().mean(dim=[0, 2, 3]) if len(shape) == 4 else m.weight.abs().mean(dim=0)
elif self.args.wg == "filter":
w_abs = m.weight.abs().mean(dim=[1, 2, 3]) if len(shape) == 4 else m.weight.abs().mean(dim=1)
elif self.args.wg == "weight":
w_abs = m.weight.abs().flatten()
return w_abs
def _greg_1(self, m, name):
if self.pr[name] == 0:
return True
if self.args.wg != 'weight': # weight is too slow
self._update_mag_ratio(m, name, self.w_abs[name])
pruned = self.pruned_wg[name]
if self.args.wg == "channel":
self.reg[name][:, pruned] += self.args.reg_granularity_prune
elif self.args.wg == "filter":
self.reg[name][pruned, :] += self.args.reg_granularity_prune
elif self.args.wg == 'weight':
self.reg[name][pruned] += self.args.reg_granularity_prune
else:
raise NotImplementedError
# when all layers are pushed hard enough, stop
if self.args.wg == 'weight': # for weight, do not use the magnitude ratio condition, because 'hist_mag_ratio' is not updated, too costly
finish_update_reg = False
else:
finish_update_reg = self.reg[name].max() > self.args.reg_upper_limit
return finish_update_reg
def _greg_2(self, m, name):
layer_index = self.layers[name].layer_index
w_abs = self._get_score(m)
n_wg = len(w_abs)
pr = self.pr[name]
if pr == 0:
self.kept_wg[name] = range(n_wg)
self.pruned_wg[name] = []
self.iter_finish_pick[name] = self.total_iter
return True
if name in self.iter_finish_pick:
recover_reg = self.args.reg_granularity_recover
# for pruned weights, push them more
if self.args.wg == 'channel':
self.reg[name][:, self.pruned_wg[name]] += self.args.reg_granularity_prune
self.reg[name][:, self.kept_wg[name]] = recover_reg
elif self.args.wg == 'filter':
self.reg[name][self.pruned_wg[name], :] += self.args.reg_granularity_prune
self.reg[name][self.kept_wg[name], :] = recover_reg
elif self.args.wg == 'weight':
self.reg[name][self.pruned_wg[name]] += self.args.reg_granularity_prune
self.reg[name][self.kept_wg[name]] = recover_reg
# for kept weights, bring them back
# 09/22 update: It seems negative reg is a bad idea to bring back magnitude.
'''
current_w_mag = w_abs[self.kept_wg[name]].mean()
recover_reg = (current_w_mag / self.original_kept_w_mag[name] - 1).item() \
* self.args.weight_decay * self.args.reg_multiplier * 10
if recover_reg > 0:
recover_reg = 0
if self.args.wg == 'channel':
self.reg[name][:, self.kept_wg[name]] = recover_reg
elif self.args.wg == 'filter':
self.reg[name][self.kept_wg[name], :] = recover_reg
'''
if self.total_iter % self.args.print_interval == 0:
print(" prune stage, push the pruned (reg = %.5f) to zero; for kept weights, reg = %.5f"
% (self.reg[name].max().item(), recover_reg))
else:
self.reg[name] += self.args.reg_granularity_pick
# plot w_abs distribution
if self.total_iter % self.args.plot_interval == 0:
self._plot_mag_ratio(w_abs, name)
if self.total_iter % self.args.plot_interval == 0:
self._log_down_mag_reg(w_abs, name)
# save order
if self.args.save_order_log and self.total_iter % self.args.update_reg_interval == 0:
if not hasattr(self, 'order_log'):
self.order_log = open('%s/order_log.txt' % self.logger.log_path, 'w+')
order = w_abs.argsort().argsort()
n_pruned = min(math.ceil(pr * n_wg), n_wg - 1)
wg_pruned = w_abs.argsort()[:n_pruned]
order = [str(x.item()) for x in order] # for each wg, its ranking
wg_pruned = [str(x.item()) for x in wg_pruned]
logtmp = ' '.join(order)
logtmp = 'Iter %d Layer#%d %s order_by_L1 %s' % (self.total_iter, layer_index, name, logtmp)
print(logtmp, file=self.order_log)
logtmp = ' '.join(wg_pruned)
logtmp = 'Iter %d Layer#%d %s wg_preprune %s' % (self.total_iter, layer_index, name, logtmp)
print(logtmp, file=self.order_log, flush=True)
# print to check magnitude ratio
if self.args.wg != 'weight':
if name in self.iter_finish_pick:
self._update_mag_ratio(m, name, w_abs)
else:
pruned_wg = self._pick_pruned_wg(w_abs, pr)
self._update_mag_ratio(m, name, w_abs, pruned=pruned_wg) # just print to check
# check if picking finishes
finish_pick_cond = self.reg[name].max() >= self.args.reg_upper_limit_pick
if name not in self.iter_finish_pick and finish_pick_cond:
self.iter_finish_pick[name] = self.total_iter
pruned_wg = self._pick_pruned_wg(w_abs, pr)
kept_wg = list(set(range(n_wg)) - set(pruned_wg))
self.kept_wg[name] = kept_wg
self.pruned_wg[name] = pruned_wg
picked_wg_in_common = [i for i in pruned_wg if i in self.pruned_wg_L1[name]]
common_ratio = len(picked_wg_in_common) / len(pruned_wg) if len(pruned_wg) else -1
n_finish_pick = len(self.iter_finish_pick)
print(" [%d] just finished pick (n_finish_pick = %d). %.2f in common chosen by L1 & greg2. Iter = %d" %
(layer_index, n_finish_pick, common_ratio, self.total_iter))
# re-scale the weights to recover the response magnitude
# factor = self.original_w_mag[name] / m.weight.abs().mean()
# m.weight.data.mul_(factor)
# print(' rescale weight by %.4f' % factor.item())
# check if all layers finish picking
self.all_layer_finish_pick = True
for k in self.reg:
if self.pr[k] > 0 and (k not in self.iter_finish_pick):
self.all_layer_finish_pick = False
break
# save mag_reg_log
if self.args.save_mag_reg_log and (self.total_iter % self.args.save_interval == 0 or \
name in self.iter_finish_pick):
out = pjoin(self.logger.log_path, "%d_mag_reg_log.npy" % layer_index)
np.save(out, self.mag_reg_log[name])
if self.all_layer_finish_pick:
exit(0)
if self.args.__dict__.get('AdaReg_only_picking') or self.args.__dict__.get('AdaReg_revive_kept'):
finish_update_reg = False # deprecated, will be removed
else:
finish_update_reg = name in self.iter_finish_pick and self.reg[name].max() > self.args.reg_upper_limit
return finish_update_reg
def _update_reg(self):
for name, m in self.model.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
cnt_m = self.layers[name].layer_index
pr = self.pr[name]
if name in self.iter_update_reg_finished.keys():
continue
if self.total_iter % self.args.print_interval == 0:
print("[%d] Update reg for layer '%s'. Pr = %s. Iter = %d"
% (cnt_m, name, pr, self.total_iter))
# get the importance score (L1-norm in this case)
self.w_abs[name] = self._get_score(m)
# update reg functions, two things:
# (1) update reg of this layer (2) determine if it is time to stop update reg
if self.args.pruner == "greg1":
finish_update_reg = self._greg_1(m, name)
elif self.args.pruner == "greg2":
finish_update_reg = self._greg_2(m, name)
else:
print("Wrong '--method' argument, please check.")
exit(1)
# check prune state
if finish_update_reg:
# after 'update_reg' stage, keep the reg to stabilize weight magnitude
self.iter_update_reg_finished[name] = self.total_iter
print("==> [%d] Just finished 'update_reg'. Iter = %d" % (cnt_m, self.total_iter))
# check if all layers finish 'update_reg'
self.prune_state = "stabilize_reg"
for n, mm in self.model.named_modules():
if isinstance(mm, nn.Conv2d) or isinstance(mm, nn.Linear):
if n not in self.iter_update_reg_finished:
self.prune_state = "update_reg"
break
if self.prune_state == "stabilize_reg":
self.iter_stabilize_reg = self.total_iter
print("==> All layers just finished 'update_reg', go to 'stabilize_reg'. Iter = %d" % self.total_iter)
self._save_model(mark='just_finished_update_reg')
# after reg is updated, print to check
if self.total_iter % self.args.print_interval == 0:
print(" reg_status: min = %.5f ave = %.5f max = %.5f" %
(self.reg[name].min(), self.reg[name].mean(), self.reg[name].max()))
def _apply_reg(self):
for name, m in self.model.named_modules():
if name in self.reg:
reg = self.reg[name] # [N, C]
if self.args.wg in ['filter', 'channel']:
if reg.shape != m.weight.data.shape:
reg = reg.unsqueeze(2).unsqueeze(3) # [N, C, 1, 1]
elif self.args.wg == 'weight':
reg = reg.view_as(m.weight.data) # [N, C, H, W]
l2_grad = reg * m.weight
l1_grad = reg * torch.sign(m.weight)
if self.args.block_loss_grad:
m.weight.grad = self.args.lw_l2 * l2_grad + self.args.lw_l1 * l1_grad
else:
m.weight.grad += self.args.lw_l2 * l2_grad + self.args.lw_l1 * l1_grad
def _resume_prune_status(self, ckpt_path):
state = torch.load(ckpt_path)
self.model = state['model'].cuda()
self.model.load_state_dict(state['state_dict'])
self.optimizer = optim.SGD(self.model.parameters(),
lr=self.args.lr_pick if self.args.__dict__.get('AdaReg_only_picking') else self.args.lr_prune,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
self.optimizer.load_state_dict(state['optimizer'])
self.prune_state = state['prune_state']
self.total_iter = state['iter']
self.iter_stabilize_reg = state.get('iter_stabilize_reg', math.inf)
self.reg = state['reg']
self.hist_mag_ratio = state['hist_mag_ratio']
def _save_model(self, acc1=0, acc5=0, mark=''):
state = {'iter': self.total_iter,
'prune_state': self.prune_state, # we will resume prune_state
'arch': self.args.arch,
'model': self.model,
'state_dict': self.model.state_dict(),
'iter_stabilize_reg': self.iter_stabilize_reg,
'acc1': acc1,
'acc5': acc5,
'optimizer': self.optimizer.state_dict(),
'reg': self.reg,
'hist_mag_ratio': self.hist_mag_ratio,
'ExpID': self.logger.ExpID,
}
# self.save(state, is_best=False, mark=mark)
def test(self, model):
val_loader = self.test_loader
criterion = self.criterion
args = self.args
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
train_state = model.training
# switch to evaluate mode
model.eval()
# @mst: add noise to model
model_ensemble = []
model_ensemble.append(model)
time_compute = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
t1 = time.time()
output = 0
for model in model_ensemble: # @mst: test model ensemble
output += model(images)
output /= len(model_ensemble)
time_compute.append((time.time() - t1) / images.size(0))
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
# print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
# @mst: commented because we will use another print outside 'validate'
# print("time compute: %.4f ms" % (np.mean(time_compute)*1000))
# change back to original model state if necessary
if train_state:
model.train()
return top1.avg.item(), top5.avg.item(), losses.avg # @mst: added returning top5 acc and loss
def prune(self):
self.model = self.model.train()
self.optimizer = optim.SGD(self.model.parameters(),
lr=self.args.lr_pick if self.args.__dict__.get('AdaReg_only_picking') else self.args.lr_prune,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
# resume model, optimzer, prune_status
self.total_iter = -1
if self.args.resume_path:
self._resume_prune_status(self.args.resume_path)
self._get_kept_wg_L1() # get pruned and kept wg from the resumed model
self.model = self.model.train()
print("Resume model successfully: '{}'. Iter = {}. prune_state = {}".format(
self.args.resume_path, self.total_iter, self.prune_state))
acc1 = acc5 = 0
total_iter_reg = self.args.reg_upper_limit / self.args.reg_granularity_prune * self.args.update_reg_interval + self.args.stabilize_reg_interval
timer = Timer(total_iter_reg / self.args.print_interval)
while True:
for _, (inputs, targets) in enumerate(self.train_loader):
inputs, targets = inputs.cuda(), targets.cuda()
self.total_iter += 1
total_iter = self.total_iter
# test
if total_iter % self.args.test_interval == 0:
acc1, acc5, *_ = self.test(self.model)
self.accprint("Acc1 = %.4f Acc5 = %.4f Iter = %d (before update) [prune_state = %s, pruner = %s]" %
(acc1, acc5, total_iter, self.prune_state, self.args.pruner))
# save model (save model before a batch starts)
if total_iter % self.args.save_interval == 0:
self._save_model(acc1, acc5)
print('Periodically save model done. Iter = {}'.format(total_iter))
if total_iter % self.args.print_interval == 0:
print("")
print("Iter = %d [prune_state = %s, pruner = %s] "
% (total_iter, self.prune_state, self.args.pruner) + "-"*40)
# forward
self.model.train()
y_ = self.model(inputs)
if self.prune_state == "update_reg" and total_iter % self.args.update_reg_interval == 0:
self._update_reg()
# normal training forward
loss = self.criterion(y_, targets)
self.optimizer.zero_grad()
loss.backward()
# after backward but before update, apply reg to the grad
self._apply_reg()
self.optimizer.step()
# log print
if total_iter % self.args.print_interval == 0:
# check BN stats
if self.args.verbose:
for name, m in self.model.named_modules():
if isinstance(m, nn.BatchNorm2d):
# get the associating conv layer of this BN layer
ix = self.all_layers.index(name)
for k in range(ix-1, -1, -1):
if self.all_layers[k] in self.layers:
last_conv = self.all_layers[k]
break
mask_ = [0] * m.weight.data.size(0)
for i in self.kept_wg[last_conv]:
mask_[i] = 1
wstr = ' '.join(['%.3f (%s)' % (x, y) for x, y in zip(m.weight.data, mask_)])
bstr = ' '.join(['%.3f (%s)' % (x, y) for x, y in zip(m.bias.data, mask_)])
logstr = f'{last_conv} BN weight: {wstr}\nBN bias: {bstr}'
print(logstr)
# check train acc
_, predicted = y_.max(1)
correct = predicted.eq(targets).sum().item()
train_acc = correct / targets.size(0)
print("After optim update current_train_loss: %.4f current_train_acc: %.4f" % (loss.item(), train_acc))
if self.args.__dict__.get('AdaReg_only_picking') and self.all_layer_finish_pick:
print("greg2 just finished picking for all layers. Resume original model and switch to greg1. Iter = %d" % total_iter)
# save picked wg
pkl_path = os.path.join(self.logger.log_path, 'picked_wg.pkl')
with open(pkl_path, "wb" ) as f:
pickle.dump(self.pruned_wg, f)
exit(0)
# set to greg1 method
self.model = self.original_model # reload the original model
self.optimizer = optim.SGD(self.model.parameters(),
lr=self.args.lr_prune,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
self.args.pruner = "greg1"
self.args.AdaReg_only_picking = False # do not get in again
# reinit
for k in self.reg:
self.reg[k] = torch.zeros_like(self.reg[k]).cuda()
self.hist_mag_ratio = {}
if self.args.__dict__.get('AdaReg_revive_kept') and self.all_layer_finish_pick:
self._prune_and_build_new_model()
print("greg2 just finished picking for all layers. Pruned and go to 'finetune'. Iter = %d" % total_iter)
return copy.deepcopy(self.model)
# change prune state
if self.prune_state == "stabilize_reg" and total_iter - self.iter_stabilize_reg == self.args.stabilize_reg_interval:
# # --- check accuracy to make sure '_prune_and_build_new_model' works normally
# # checked. works normally!
# for name, m in self.model.named_modules():
# if isinstance(m, self.learnable_layers):
# pruned_filter = self.pruned_wg[name]
# m.weight.data[pruned_filter] *= 0
# next_bn = self._next_bn(self.model, m)
# elif isinstance(m, nn.BatchNorm2d) and m == next_bn:
# m.weight.data[pruned_filter] *= 0
# m.bias.data[pruned_filter] *= 0
# acc1_before, *_ = self.test(self.model)
# self._prune_and_build_new_model()
# acc1_after, *_ = self.test(self.model)
# print(acc1_before, acc1_after)
# exit()
# # ---
model_before_removing_weights = copy.deepcopy(self.model)
self._prune_and_build_new_model()
print("'stabilize_reg' is done. Pruned, go to 'finetune'. Iter = %d" % total_iter)
return model_before_removing_weights, copy.deepcopy(self.model)
if total_iter % self.args.print_interval == 0:
print(f"predicted_finish_time of reg: {timer()}")
def _plot_mag_ratio(self, w_abs, name):
fig, ax = plt.subplots()
max_ = w_abs.max().item()
w_abs_normalized = (w_abs / max_).data.cpu().numpy()
ax.plot(w_abs_normalized)
ax.set_ylim([0, 1])
ax.set_xlabel('filter index')
ax.set_ylabel('relative L1-norm ratio')
layer_index = self.layers[name].layer_index
shape = self.layers[name].size
ax.set_title("layer %d iter %d shape %s\n(max = %s)"
% (layer_index, self.total_iter, shape, max_))
out = pjoin(self.logger.logplt_path, "%d_iter%d_w_abs_dist.jpg" %
(layer_index, self.total_iter))
fig.savefig(out)
plt.close(fig)
np.save(out.replace('.jpg', '.npy'), w_abs_normalized)
def _log_down_mag_reg(self, w_abs, name):
step = self.total_iter
reg = self.reg[name].max().item()
mag = w_abs.data.cpu().numpy()
if name not in self.mag_reg_log:
values = [[step, reg, mag]]
log = {
'name': name,
'layer_index': self.layers[name].layer_index,
'shape': self.layers[name].size,
'values': values,
}
self.mag_reg_log[name] = log
else:
values = self.mag_reg_log[name]['values']
values.append([step, reg, mag]) | 28,706 | 46.845 | 151 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/l1_pruner_iterative.py | import torch
import torch.nn as nn
import copy
import time
import numpy as np
import torch.optim as optim
from .meta_pruner import MetaPruner
from utils import PresetLRScheduler, Timer
from pdb import set_trace as st
class Pruner(MetaPruner):
def __init__(self, model, loader, args, logger, passer):
super(Pruner, self).__init__(model, loader, args, logger, passer)
self.pr_backup = {}
for k, v in self.pr.items():
self.pr_backup[k] = v
def _update_pr(self, cycle):
'''update layer pruning ratio in iterative pruning
'''
for layer, pr in self.pr_backup.items():
pr_each_time_to_current = 1 - (1 - pr) ** (1. / self.args.num_cycles)
pr_each_time = pr_each_time_to_current * ( (1-pr_each_time_to_current) ** (cycle-1) )
self.pr[layer] = pr_each_time if self.args.wg in ['filter', 'channel'] else pr_each_time + self.pr[layer]
def _apply_mask_forward(self):
assert hasattr(self, 'mask') and len(self.mask.keys()) > 0
for name, m in self.model.named_modules():
if name in self.mask:
m.weight.data.mul_(self.mask[name])
def _finetune(self, cycle):
lr_scheduler = PresetLRScheduler(self.args.lr_ft_mini)
optimizer = optim.SGD(self.model.parameters(),
lr=0, # placeholder, this will be updated later
momentum=self.args.momentum,
weight_decay=self.args.weight_decay)
best_acc1, best_acc1_epoch = 0, 0
timer = Timer(self.args.epochs_mini)
for epoch in range(self.args.epochs_mini):
lr = lr_scheduler(optimizer, epoch)
print(f'[Subprune #{cycle} Finetune] Epoch {epoch} Set LR = {lr}')
for ix, (inputs, targets) in enumerate(self.train_loader):
inputs, targets = inputs.cuda(), targets.cuda()
self.model.train()
y_ = self.model(inputs)
loss = self.criterion(y_, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self.args.method and self.args.wg == 'weight':
self._apply_mask_forward()
if ix % self.args.print_interval == 0:
print(f'[Subprune #{cycle} Finetune] Epoch {epoch} Step {ix} loss {loss:.4f}')
# test
acc1, *_ = self.test(self.model)
if acc1 > best_acc1:
best_acc1 = acc1
best_acc1_epoch = epoch
self.accprint(f'[Subprune #{cycle} Finetune] Epoch {epoch} Acc1 {acc1:.4f} (Best_Acc1 {best_acc1:.4f} @ Best_Acc1_Epoch {best_acc1_epoch}) LR {lr}')
print(f'predicted finish time: {timer()}')
def prune(self):
# clear existing pr
for layer in self.pr:
self.pr[layer] = 0
for cycle in range(1, self.args.num_cycles + 1):
print(f'==> Start subprune #{cycle}')
self._update_pr(cycle)
self._get_kept_wg_L1()
self._prune_and_build_new_model()
if cycle < self.args.num_cycles:
self._finetune(cycle) # there is a big finetuning after the last pruning, so do not finetune here
return self.model | 3,360 | 39.987805 | 160 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/feat_analyze.py |
from collections import OrderedDict
import torch.nn as nn
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class FeatureAnalyzer():
def __init__(self, model, data_loader, criterion, print=print):
self.feat_mean = OrderedDict()
self.grad_mean = OrderedDict()
self.data_loader = data_loader
self.criterion = criterion
self.print = print
self.layer_names = {}
for name, module in model.named_modules():
self.layer_names[module] = name
self.register_hooks(model)
self.analyze_feat(model)
self.rm_hooks(model)
def register_hooks(self, model):
def forward_hook(m, i, o):
name = self.layer_names[m]
if name not in self.feat_mean:
self.feat_mean[name] = AverageMeter(name)
self.feat_mean[name].update(o.abs().mean().item(), o.size(0))
def backward_hook(m, grad_i, grad_o):
name = self.layer_names[m]
if name not in self.grad_mean:
self.grad_mean[name] = AverageMeter(name)
assert len(grad_o) == 1
self.grad_mean[name].update(grad_o[0].abs().mean().item(), grad_o[0].size(0))
for _, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
module.register_forward_hook(forward_hook)
module.register_backward_hook(backward_hook)
def rm_hooks(self, model):
for _, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
module._forward_hooks = OrderedDict()
module._backward_hooks = OrderedDict()
def analyze_feat(self, model):
# forward to activate hooks
for i, (images, target) in enumerate(self.data_loader):
images, target = images.cuda(), target.cuda()
output = model(images)
loss = self.criterion(output, target)
loss.backward()
max_key_len = np.max([len(k) for k in self.feat_mean.keys()])
for k, v in self.feat_mean.items():
grad = self.grad_mean[k]
self.print(f'{k.rjust(max_key_len)} -- feat_mean {v.avg:.4f} grad_mean {grad.avg:.10f}') | 2,834 | 34.886076 | 100 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/pruner/meta_pruner.py | import torch
import torch.nn as nn
import copy
import time
import numpy as np
from math import ceil, sqrt
from collections import OrderedDict
from utils import strdict_to_dict
from .layer import register_modulename, register_hook, rm_hook, Layer
from fnmatch import fnmatch, fnmatchcase
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MetaPruner:
def __init__(self, model, loader, args, logger):
self.model = model
self.args = args
self.logger = logger
self.accprint = logger.accprint
self.netprint = logger.netprint
self.train_loader = loader.train_loader
self.test_loader = loader.test_loader
self.criterion = logger.passer['criterion']
self.input_size = logger.passer['input_size']
self.is_single_branch = logger.passer['is_single_branch']
# register learnable layers
self.learnable_layers = (nn.Conv2d, nn.Conv1d, nn.Linear) # Note: for now, we only focus on weights in Conv and FC modules, no BN.
self.layers = OrderedDict() # learnable layers
self.all_layers = [] # all layers
self._register_layers()
arch = self.args.arch
if arch.startswith('resnet'):
# TODO: add block
self.n_conv_within_block = 0
if args.dataset == "imagenet":
if arch in ['resnet18', 'resnet34']:
self.n_conv_within_block = 2
elif arch in ['resnet50', 'resnet101', 'resnet152']:
self.n_conv_within_block = 3
else:
self.n_conv_within_block = 2
self.kept_wg = {}
self.pruned_wg = {}
self.get_pr() # set up pr for each layer
def _pick_pruned(self, w_abs, pr, mode="min"):
if pr == 0:
return []
w_abs_list = w_abs # .flatten()
n_wg = len(w_abs_list)
n_pruned = min(ceil(pr * n_wg), n_wg - 1) # do not prune all
if mode == "rand":
out = np.random.permutation(n_wg)[:n_pruned]
elif mode == "min":
out = w_abs_list.sort()[1][:n_pruned]
out = out.data.cpu().numpy()
elif mode == "max":
out = w_abs_list.sort()[1][-n_pruned:]
out = out.data.cpu().numpy()
return out
def _register_layers(self):
''' This will maintain a data structure that can return some useful
information by the name of a layer.
'''
# register module name
register_modulename(self.model)
# register layers
self._max_len_name = 0
kwargs = {
'layers': self.layers,
'max_len_name': self._max_len_name,
'learnable_layers': self.learnable_layers,
'res': True if 'resnet' in self.args.arch else False,
}
handles = register_hook(self.model, **kwargs) # this will update 'self.layers'
dummy_input = torch.randn(self.input_size).to(DEVICE)
self.model(dummy_input) # make hooks physically work
rm_hook(handles)
print("Register layer index and kernel shape:")
self._max_len_ix = len(f'{len(self.layers)}')
format_str = "[%{}d] %{}s -- kernel_shape: %s".format(self._max_len_ix, self._max_len_name)
for k, v in self.layers.items():
print(format_str % (v.layer_index, v.name, v.size) + f' <== {v.last}')
def _next_learnable_layer(self, model, name, mm):
'''get the next conv or fc layer name
'''
if hasattr(self.layers[name], 'block_index'):
block_index = self.layers[name].block_index
if block_index == self.n_conv_within_block - 1:
return None
ix = self.layers[name].layer_index # layer index of current layer
type_ = mm.__class__.__name__ # layer type of current layer
for name, layer in self.layers.items():
if layer.layer_type == type_ and layer.layer_index == ix + 1: # for now, requires the same layer_type for wg == 'channel'. TODO: generalize this
return name
return None
def _prev_learnable_layer(self, model, name, mm):
'''get the previous conv or fc layer name
'''
# explicitly provide the previous layer name, then use it as the highest priority!
# useful for complex residual networks
for p in self.args.previous_layers:
if fnmatch(name, p):
prev_layer = self.args.previous_layers[p]
if prev_layer.lower() == 'none':
return None
else:
return prev_layer
# standard resnets. hardcoding, deprecated, will be improved
if hasattr(self.layers[name], 'block_index'):
block_index = self.layers[name].block_index
if block_index in [None, 0, -1]: # 1st conv, 1st conv in a block, 1x1 shortcut layer
return None
# get the previous layer by order
ix = self.layers[name].layer_index # layer index of current layer
for name, layer in self.layers.items():
if layer.layer_index == ix - 1:
return name
return None
def _next_bn(self, model, mm):
''' TODO: replace with layers impl. instead of using "model.modules()" '''
just_passed_mm = False
for m in model.modules():
if m == mm:
just_passed_mm = True
if just_passed_mm and isinstance(m, nn.BatchNorm2d):
return m
return None
def _replace_module(self, model, name, new_m):
'''
Replace the module <name> in <model> with <new_m>
E.g., 'module.layer1.0.conv1'
==> model.__getattr__('module').__getattr__("layer1").__getitem__(0).__setattr__('conv1', new_m)
'''
obj = model
segs = name.split(".")
for ix in range(len(segs)):
s = segs[ix]
if ix == len(segs) - 1: # the last one
if s.isdigit():
obj.__setitem__(int(s), new_m)
else:
obj.__setattr__(s, new_m)
return
if s.isdigit():
obj = obj.__getitem__(int(s))
else:
obj = obj.__getattr__(s)
def _get_n_filter(self, model):
'''
Do not consider the downsample 1x1 shortcuts.
'''
n_filter = OrderedDict()
for name, m in model.named_modules():
if name in self.layers:
if not self.layers[name].is_shortcut:
ix = self.layers[name].layer_index
n_filter[ix] = m.weight.size(0)
return n_filter
def _get_layer_pr_vgg(self, name):
'''Example: '[0-4:0.5, 5:0.6, 8-10:0.2]'
6, 7 not mentioned, default value is 0
'''
layer_index = self.layers[name].layer_index
pr = self.args.stage_pr[layer_index]
if str(layer_index) in self.args.skip_layers:
pr = 0
return pr
def _get_layer_pr_resnet(self, name):
'''
This function will determine the prune_ratio (pr) for each specific layer
by a set of rules.
'''
wg = self.args.wg
layer_index = self.layers[name].layer_index
stage = self.layers[name].stage
seq_index = self.layers[name].seq_index
block_index = self.layers[name].block_index
is_shortcut = self.layers[name].is_shortcut
pr = self.args.stage_pr[stage]
# for unstructured pruning, no restrictions, every layer can be pruned
if self.args.wg != 'weight':
# do not prune the shortcut layers for now
if is_shortcut:
pr = 0
# do not prune layers we set to be skipped
layer_id = '%s.%s.%s' % (str(stage), str(seq_index), str(block_index))
for s in self.args.skip_layers:
if s and layer_id.startswith(s):
pr = 0
# for channel/filter prune, do not prune the 1st/last conv in a block
if (wg == "channel" and block_index == 0) or \
(wg == "filter" and block_index == self.n_conv_within_block - 1):
pr = 0
return pr
def _get_pr_by_name_matching(self, name):
pr = 0 # default pr = 0
for p in self.args.stage_pr:
if fnmatch(name, p):
pr = self.args.stage_pr[p]
return pr
def get_pr(self):
r"""Get layer-wise pruning ratio for each layer.
"""
self.pr = {}
# The provided base model may have sparsity already, so here we take a look at the model first.
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
self.pr[name] = 1. - m.weight.data.count_nonzero() / m.weight.data.numel()
print(f'Layer {name} has sparsity: {self.pr[name]}')
# Get new pr for current round of pruning
if self.args.stage_pr:
assert self.args.base_pr_model is None
if self.args.index_layer == 'numbers': # old way to assign pruning ratios, deprecated, will be removed
get_layer_pr = self._get_layer_pr_vgg if self.is_single_branch(self.args.arch) else self._get_layer_pr_resnet
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
pr_this_time = get_layer_pr(name)
self.pr[name] = pr_this_time if self.args.wg in ['filter', 'channel'] else self.pr[name] + (1 - self.pr[name]) * pr_this_time
elif self.args.index_layer == 'name_matching':
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
pr_this_time = self._get_pr_by_name_matching(name)
self.pr[name] = pr_this_time if self.args.wg in ['filter', 'channel'] else self.pr[name] + (1 - self.pr[name]) * pr_this_time
else:
# TODO-@mst: This path does not support iterative pruning so far. Will add this.
assert self.args.base_pr_model
state = torch.load(self.args.base_pr_model)
self.pruned_wg_pr_model = state['pruned_wg']
self.kept_wg_pr_model = state['kept_wg']
for k in self.pruned_wg_pr_model:
n_pruned = len(self.pruned_wg_pr_model[k])
n_kept = len(self.kept_wg_pr_model[k])
self.pr[k] = float(n_pruned) / (n_pruned + n_kept)
print("==> Load base_pr_model successfully and inherit its pruning ratio: '{}'".format(self.args.base_pr_model))
def _get_kept_wg_L1(self):
'''Decide kept (or pruned) weight group by L1-norm sorting.
'''
if self.args.base_pr_model and self.args.inherit_pruned == 'index':
self.pruned_wg = self.pruned_wg_pr_model
self.kept_wg = self.kept_wg_pr_model
print("==> Inherit the pruned index from base_pr_model: '{}'".format(self.args.base_pr_model))
else:
wg = self.args.wg
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
shape = m.weight.data.shape
if wg == "filter":
score = m.weight.abs().mean(dim=[1, 2, 3]) if len(shape) == 4 else m.weight.abs().mean(dim=1)
elif wg == "channel":
score = m.weight.abs().mean(dim=[0, 2, 3]) if len(shape) == 4 else m.weight.abs().mean(dim=0)
elif wg == "weight":
score = m.weight.abs().flatten()
else:
raise NotImplementedError
self.pruned_wg[name] = self._pick_pruned(score, self.pr[name], self.args.pick_pruned)
self.kept_wg[name] = list(set(range(len(score))) - set(self.pruned_wg[name]))
format_str = f"[%{self._max_len_ix}d] %{self._max_len_name}s -- shape {shape} -- got pruned wg by L1 sorting ({self.args.pick_pruned}), pr {self.pr[name]}"
logtmp = format_str % (self.layers[name].layer_index, name)
# compare the pruned weights picked by L1-sorting vs. other criterion which provides the base_pr_model (e.g., OBD)
if self.args.base_pr_model:
intersection = [x for x in self.pruned_wg_pr_model[name] if x in self.pruned_wg[name]]
intersection_ratio = len(intersection) / len(self.pruned_wg[name]) if len(self.pruned_wg[name]) else 0
logtmp += ', intersection ratio of the weights picked by L1 vs. base_pr_model: %.4f (%d)' % (intersection_ratio, len(intersection))
print(logtmp)
def _get_kept_filter_channel(self, m, name):
'''For filter/channel pruning, prune one layer will affect the following/previous layer. This func is to figure out which filters
and channels will be kept in a layer speficially.
'''
if self.args.wg == "channel":
kept_chl = self.kept_wg[name]
next_learnable_layer = self._next_learnable_layer(self.model, name, m)
if not next_learnable_layer:
kept_filter = list(range(m.weight.size(0)))
else:
kept_filter = self.kept_wg[next_learnable_layer]
elif self.args.wg == "filter":
kept_filter = self.kept_wg[name]
prev_learnable_layer = self._prev_learnable_layer(self.model, name, m)
if isinstance(m, nn.Conv2d) and m.groups == m.weight.shape[0] and m.weight.shape[1] == 1: # depth-wise conv
kept_chl = [0] # depth-wise conv, channel number is always 1
if prev_learnable_layer:
kept_filter = [x for x in kept_filter if x in self.kept_wg[prev_learnable_layer]]
self.kept_wg[name] = kept_filter
else:
if not prev_learnable_layer:
kept_chl = list(range(m.weight.size(1)))
else:
if self.layers[name].layer_type == self.layers[prev_learnable_layer].layer_type:
kept_chl = self.kept_wg[prev_learnable_layer]
else: # current layer is the 1st fc, the previous layer is the last conv
last_conv_n_filter = self.layers[prev_learnable_layer].size[0]
last_conv_fm_size = int(m.weight.size(1) / last_conv_n_filter) # feature map spatial size. 36 for alexnet
print('last_conv_feature_map_size: %dx%d (before fed into the first fc)' % (sqrt(last_conv_fm_size), sqrt(last_conv_fm_size)))
last_conv_kept_filter = self.kept_wg[prev_learnable_layer]
kept_chl = []
for i in last_conv_kept_filter:
tmp = list(range(i * last_conv_fm_size, i * last_conv_fm_size + last_conv_fm_size))
kept_chl += tmp
return kept_filter, kept_chl
def _prune_and_build_new_model(self):
if self.args.wg == 'weight':
self._get_mask()
# Apply mask
for name, m in self.model.named_modules():
if name in self.mask:
m.weight.data.mul_(self.mask[name])
return
new_model = copy.deepcopy(self.model)
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
kept_filter, kept_chl = self._get_kept_filter_channel(m, name)
# print(f'{name} kept_filter: {kept_filter} kept_chl: {kept_chl}')
# copy weight and bias
bias = False if isinstance(m.bias, type(None)) else True
if isinstance(m, nn.Conv2d):
kept_weights = m.weight.data[kept_filter][:, kept_chl, :, :]
if m.weight.shape[0] == m.groups and m.weight.shape[1] == 1: # depth-wise conv
groups = len(kept_filter)
else:
groups = m.groups
new_layer = nn.Conv2d(len(kept_chl) * groups, len(kept_filter), m.kernel_size,
m.stride, m.padding, m.dilation, groups, bias).cuda()
elif isinstance(m, nn.Linear):
kept_weights = m.weight.data[kept_filter][:, kept_chl]
new_layer = nn.Linear(in_features=len(kept_chl), out_features=len(kept_filter), bias=bias).cuda()
new_layer.weight.data.copy_(kept_weights) # load weights into the new module
if bias:
kept_bias = m.bias.data[kept_filter]
new_layer.bias.data.copy_(kept_bias)
# load the new conv
self._replace_module(new_model, name, new_layer)
# get the corresponding bn (if any) for later use
next_bn = self._next_bn(self.model, m)
elif isinstance(m, nn.BatchNorm2d) and m == next_bn:
new_bn = nn.BatchNorm2d(len(kept_filter), eps=m.eps, momentum=m.momentum,
affine=m.affine, track_running_stats=m.track_running_stats).cuda()
# copy bn weight and bias
if self.args.copy_bn_w:
weight = m.weight.data[kept_filter]
new_bn.weight.data.copy_(weight)
if self.args.copy_bn_b:
bias = m.bias.data[kept_filter]
new_bn.bias.data.copy_(bias)
# copy bn running stats
new_bn.running_mean.data.copy_(m.running_mean[kept_filter])
new_bn.running_var.data.copy_(m.running_var[kept_filter])
new_bn.num_batches_tracked.data.copy_(m.num_batches_tracked)
# load the new bn
self._replace_module(new_model, name, new_bn)
self.model = new_model
n_filter = self._get_n_filter(self.model)
logtmp = '{'
for ix, num in n_filter.items():
logtmp += '%s:%d, ' % (ix, num)
logtmp = logtmp[:-2] + '}'
print('n_filter of pruned model: %s' % logtmp)
def _get_mask(self):
r"""Get masks for unstructured pruning.
"""
self.mask = {}
for name, m in self.model.named_modules():
if isinstance(m, self.learnable_layers):
mask = torch.ones_like(m.weight.data).cuda().flatten()
pruned = self.pruned_wg[name]
mask[pruned] = 0
self.mask[name] = mask.view_as(m.weight.data)
self.model.mask = self.mask
print('Get masks done for weight pruning') | 19,181 | 45.899756 | 175 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/reiniter/lth.py | import torch
import torch.nn as nn
class Reiniter():
def __init__(self, model, loader, args, logger):
self.model = model
self.ckpt_init = logger.passer['ckpt_init']
def reinit(self):
assert hasattr(self.model, 'mask'), "'model' should has attr 'mask'."
state_dict = torch.load(self.ckpt_init)['state_dict']
self.model.load_state_dict(state_dict)
for name, m in self.model.named_modules():
if name in self.model.mask:
m.weight.data.mul_(self.model.mask[name])
print('==> Reinit model: use LTH-like reinitialization - apply masks to initial weights') | 642 | 39.1875 | 97 | py |
Smile-Pruning | Smile-Pruning-master/src/method_modules/reiniter/pth_reset.py | from utils import _weights_init, _weights_init_orthogonal, orthogonalize_weights, delta_orthogonalize_weights
import torch
import torch.nn as nn
from torch.nn.init import _calculate_correct_fan, calculate_gain
import torch.nn.functional as F
import numpy as np, math
def isfloat(num):
try:
float(num)
return True
except ValueError:
return False
def rescale_model(model, rescale='std', a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Refer to: https://pytorch.org/docs/stable/_modules/torch/nn/init.html#kaiming_uniform_
"""
for name, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
if rescale.startswith('std'):
tensor = module.weight.data
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
current_std = torch.std(module.weight.data)
factor = std / current_std
if rescale != 'std':
factor *= float(rescale[3:])
elif isfloat(rescale):
factor = float(rescale)
else:
raise NotImplementedError
module.weight.data.copy_(module.weight.data * factor)
print(f'Rescale layer "{name}", factor: {factor:.4f}')
return model
def approximate_isometry_optimize(model, mask, lr, n_iter, wg='weight', print=print):
'''Refer to: 2020-ICLR-A Signal Propagation Perspective for Pruning Neural Networks at Initialization (ICLR 2020).
Code: https://github.com/namhoonlee/spp-public
'''
def optimize(w, layer_name):
'''Approximate Isometry for sparse weights by iterative optimization
'''
flattened = w.view(w.size(0), -1) # [n_filter, -1]
identity = torch.eye(w.size(0)).cuda() # identity matrix
w_ = torch.autograd.Variable(flattened, requires_grad=True)
optim = torch.optim.Adam([w_], lr)
for i in range(n_iter):
loss = nn.MSELoss()(torch.matmul(w_, w_.t()), identity)
optim.zero_grad()
loss.backward()
optim.step()
if not isinstance(mask, type(None)):
w_ = torch.mul(w_, mask[layer_name].view_as(w_)) # not update the pruned params
w_ = torch.autograd.Variable(w_, requires_grad=True)
optim = torch.optim.Adam([w_], lr)
# if i % 100 == 0:
# print('[%d/%d] approximate_isometry_optimize for layer "%s", loss %.6f' % (i, n_iter, name, loss.item()))
return w_.view(m.weight.shape)
for name, m in model.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
w_ = optimize(m.weight, name)
m.weight.data.copy_(w_)
print('Finished approximate_isometry_optimize for layer "%s"' % name)
def exact_isometry_based_on_existing_weights(model, act, print=print):
'''Our proposed method.
'''
for name, m in model.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
w_ = orthogonalize_weights(m.weight, act=act)
m.weight.data.copy_(w_)
print('Finished exact_isometry for layer "%s"' % name)
def exact_isometry_based_on_existing_weights_delta(model, act, print=print):
'''Refer to 2018-ICML-Dynamical Isometry and a Mean Field Theory of CNNs: How to Train 10,000-Layer Vanilla Convolutional Neural Networks
'''
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
w_ = delta_orthogonalize_weights(m.weight, act=act)
m.weight.data.copy_(w_)
print('Finished isometry for conv layer "%s"' % name)
elif isinstance(m, nn.Linear):
w_ = orthogonalize_weights(m.weight, act=act)
m.weight.data.copy_(w_)
print('Finished isometry for linear layer "%s"' % name)
def reinit_model(model, args, mask, print):
if args.reinit in ['default', 'kaiming_normal']: # Note this default is NOT pytorch default init scheme!
model.apply(_weights_init) # completely reinit weights via 'kaiming_normal'
print("==> Reinit model: default ('kaiming_normal' for Conv/FC; 0 mean, 1 std for BN)")
elif args.reinit in ['orth', 'exact_isometry_from_scratch']:
model.apply(lambda m: _weights_init_orthogonal(m, act=args.activation, scale=args.reinit_scale)) # reinit weights via 'orthogonal_' from scratch
print("==> Reinit model: exact_isometry ('orthogonal_' for Conv/FC; 0 mean, 1 std for BN)")
elif args.reinit == 'exact_isometry_based_on_existing':
exact_isometry_based_on_existing_weights(model, act=args.activation, print=print) # orthogonalize weights based on existing weights
print("==> Reinit model: exact_isometry (orthogonalize Conv/FC weights based on existing weights)")
elif args.reinit == 'exact_isometry_based_on_existing_delta':
exact_isometry_based_on_existing_weights_delta(model, act=args.activation, print=print)
elif args.reinit == 'approximate_isometry': # A Signal Propagation Perspective for Pruning Neural Networks at Initialization (ICLR 2020)
approximate_isometry_optimize(model, mask=mask, lr=args.lr_AI, n_iter=10000, print=print) # 10000 refers to the paper above; lr in the paper is 0.1, but not converged here
print("==> Reinit model: approximate_isometry")
elif args.reinit in ['pth_reset']:
learnable_modules = (nn.Conv2d, nn.Linear, nn.BatchNorm2d)
for _, module in model.named_modules():
if isinstance(module, learnable_modules):
module.reset_parameters()
print('==> Reinit model: use pytorch reset_parameters()')
else:
raise NotImplementedError
return model
def orth_regularization(w, transpose=True):
w_ = w.view(w.size(0), -1)
if transpose and w_.size(0) < w.size(1):
w_ = w_.t()
identity = torch.eye(w_.size(0)).cuda()
loss = nn.MSELoss()(torch.matmul(w_, w_.t()), identity)
return loss
def orth_regularization_v3(w, pruned_wg):
w_ = w.view(w.size(0), -1)
identity = torch.eye(w_.size(0)).cuda()
for x in pruned_wg:
identity[x, x] = 0
loss = nn.MSELoss()(torch.matmul(w_, w_.t()), identity)
# torch.norm(w.t_() @ w - torch.eye(w.size(1)).cuda())
return loss
def deconv_orth_dist(kernel, stride = 2, padding = 1):
'''Refer to 2020-CVPR-Orthogonal Convolutional Neural Networks.
https://github.com/samaonline/Orthogonal-Convolutional-Neural-Networks
CodeID: 14de526
'''
[o_c, i_c, w, h] = kernel.shape
output = torch.conv2d(kernel, kernel, stride=stride, padding=padding)
target = torch.zeros((o_c, o_c, output.shape[-2], output.shape[-1])).cuda()
ct = int(np.floor(output.shape[-1]/2))
target[:,:,ct,ct] = torch.eye(o_c).cuda()
return torch.norm( output - target )
def orth_dist(mat, stride=None):
'''Refer to 2020-CVPR-Orthogonal Convolutional Neural Networks.
https://github.com/samaonline/Orthogonal-Convolutional-Neural-Networks
CodeID: 14de526
'''
mat = mat.reshape( (mat.shape[0], -1) )
if mat.shape[0] < mat.shape[1]:
mat = mat.permute(1,0)
return torch.norm( torch.t(mat)@mat - torch.eye(mat.shape[1]).cuda())
def orth_regularization_v4(w, original_column_gram, pruned_wg):
# row: orthogonal
w_ = w.view(w.size(0), -1)
identity = torch.eye(w_.size(0)).cuda()
for x in pruned_wg:
identity[x, x] = 0
loss1 = nn.MSELoss()(torch.matmul(w_, w_.t()), identity)
# column: maintain energy
loss2 = nn.MSELoss()(torch.matmul(w_.t(), w_), original_column_gram)
return loss1, loss2
def orth_regularization_v5(w, pruned_wg):
'''Decorrelate kept weights from pruned weights.
'''
w_ = w.view(w.size(0), -1)
target_gram = torch.matmul(w_, w_.t()).detach() # note: detach!
for x in pruned_wg:
target_gram[x, :] = 0
target_gram[:, x] = 0
loss = F.mse_loss(torch.matmul(w_, w_.t()), target_gram, reduction='mean')
return loss
def orth_regularization_v5_2(w, pruned_wg):
'''Implementation of "Kernel orthogonality for pruning". Compared to v5 for ablation study.
'''
w_ = w.view(w.size(0), -1)
identity = torch.eye(w_.size(0)).cuda()
for x in pruned_wg:
identity[x, x] = 0
loss = F.mse_loss(torch.matmul(w_, w_.t()), identity, reduction='mean')
return loss
def orth_regularization_v6(w, pruned_wg, penalty_map):
'''Based on v5_2. Apply different penalty strength to different gram elements'''
w_ = w.view(w.size(0), -1)
identity = torch.eye(w_.size(0)).cuda()
for x in pruned_wg:
identity[x, x] = 0
loss_map = F.mse_loss(torch.matmul(w_, w_.t()), identity, reduction='none') * penalty_map
loss = loss_map.mean()
return loss
class Reiniter():
def __init__(self, model, loader, args, logger):
self.model = model
def reinit(self):
learnable_modules = (nn.Conv2d, nn.Linear, nn.BatchNorm2d)
for _, module in self.model.named_modules():
if isinstance(module, learnable_modules):
module.reset_parameters()
print('==> Reinit model: use pytorch reset_parameters()') | 9,299 | 43.285714 | 179 | py |
Smile-Pruning | Smile-Pruning-master/src/model/mobilenetv2.py | import torch
import torch.nn as nn
from torchvision.models import alexnet
try:
from torchvision.models import mobilenet_v2
except:
pass
from model import generator as g
# modify mobilenet to my interface
class MobilenetV2(nn.Module):
def __init__(self, n_class=1000, width_mult=1.0):
super(MobilenetV2, self).__init__()
self.net = mobilenet_v2(width_mult=width_mult)
self.net.classifier = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(in_features=1280, out_features=n_class, bias=True)
)
def forward(self, x, out_feat=False):
embed = self.net.features(x).mean([2, 3])
x = self.net.classifier(embed)
return (x, embed) if out_feat else x
# modify alexnet to my interface
class AlexNet(nn.Module):
def __init__(self, pretrained=False):
super(AlexNet, self).__init__()
if pretrained:
self.net = alexnet(True)
else:
self.net = alexnet()
def forward(self, x, out_feat=False):
embed = self.net.features(x).view(x.size(0), -1)
x = self.net.classifier(embed)
return (x, embed) if out_feat else x | 1,075 | 28.888889 | 66 | py |
Smile-Pruning | Smile-Pruning-master/src/model/vgg.py | # This file is referring to: [EigenDamage, ICML'19] at https://github.com/alecwangcq/EigenDamage-Pytorch.
# We modified a little to make it more neat and standard.
import math
import torch
import torch.nn as nn
import torch.nn.init as init
def _weights_init(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1.0)
m.bias.data.zero_()
_AFFINE = True
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class VGG(nn.Module):
def __init__(self, depth=19, num_classes=10, num_channels=3, use_bn=True, init_weights=True, cfg=None):
super(VGG, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self.num_channels = num_channels
self.features = self.make_layers(cfg, use_bn)
self.classifier = nn.Linear(cfg[-1], num_classes)
if init_weights:
self.apply(_weights_init)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = self.num_channels
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v, affine=_AFFINE), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.features(x)
x = nn.AvgPool2d(x.size(3))(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def vgg11(num_classes=10, num_channels=3, use_bn=True, **kwargs):
return VGG(11, num_classes=num_classes, num_channels=num_channels, use_bn=use_bn)
def vgg13(num_classes=10, num_channels=3, use_bn=True, **kwargs):
return VGG(13, num_classes=num_classes, num_channels=num_channels, use_bn=use_bn)
def vgg16(num_classes=10, num_channels=3, use_bn=True, **kwargs):
return VGG(16, num_classes=num_classes, num_channels=num_channels, use_bn=use_bn)
def vgg19(num_classes=10, num_channels=3, use_bn=True, **kwargs):
return VGG(19, num_classes=num_classes, num_channels=num_channels, use_bn=use_bn)
| 3,423 | 37.47191 | 107 | py |
Smile-Pruning | Smile-Pruning-master/src/model/lenet5.py | import torch
import torch.nn as nn
class LeNet5(nn.Module):
def __init__(self, num_classes: int = 10, num_channels: int = 1):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(num_channels, 6, kernel_size=(5, 5))
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5))
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(16, 120, kernel_size=(5, 5))
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(120, 84)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(84, num_classes)
def forward(self, img):
output = self.conv1(img)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.relu3(output)
feature = output.view(output.size(0), -1)
output = self.fc1(feature)
output = self.relu4(output)
output = self.fc2(output)
return output
class LeNet5_Mini(nn.Module):
def __init__(self, num_classes: int = 10, num_channels: int = 1):
super(LeNet5_Mini, self).__init__()
self.conv1 = nn.Conv2d(num_channels, 10, kernel_size=(5, 5))
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(10, 10, kernel_size=(5, 5))
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(10, 10, kernel_size=(5, 5))
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(10, 10)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(10, num_classes)
def forward(self, img):
output = self.conv1(img)
output = self.relu1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.relu3(output)
feature = output.view(output.size(0), -1)
output = self.fc1(feature)
output = self.relu4(output)
output = self.fc2(output)
return output
class LeNet5_Linear(nn.Module):
def __init__(self, num_classes: int = 10, num_channels: int = 1):
super(LeNet5_Linear, self).__init__()
self.conv1 = nn.Conv2d(num_channels, 6, kernel_size=(5, 5))
self.act1 = nn.Identity()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5))
self.act2 = nn.Identity()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(16, 120, kernel_size=(5, 5))
self.act3 = nn.Identity()
self.fc1 = nn.Linear(120, 84)
self.act4 = nn.Identity()
self.fc2 = nn.Linear(84, num_classes)
def forward(self, img):
output = self.conv1(img)
output = self.act1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.act2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.act3(output)
feature = output.view(output.size(0), -1)
output = self.fc1(feature)
output = self.act4(output)
output = self.fc2(output)
return output
class LeNet5_Wider_Linear(nn.Module):
''' 10x num of filters in conv layers
'''
def __init__(self, num_classes: int = 10, num_channels: int = 1):
super(LeNet5_Wider_Linear, self).__init__()
self.conv1 = nn.Conv2d(num_channels, 60, kernel_size=(5, 5))
self.act1 = nn.Identity()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv2 = nn.Conv2d(60, 160, kernel_size=(5, 5))
self.act2 = nn.Identity()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.conv3 = nn.Conv2d(160, 1200, kernel_size=(5, 5))
self.act3 = nn.Identity()
self.fc1 = nn.Linear(1200, 84)
self.act4 = nn.Identity()
self.fc2 = nn.Linear(84, num_classes)
def forward(self, img):
output = self.conv1(img)
output = self.act1(output)
output = self.maxpool1(output)
output = self.conv2(output)
output = self.act2(output)
output = self.maxpool2(output)
output = self.conv3(output)
output = self.act3(output)
feature = output.view(output.size(0), -1)
output = self.fc1(feature)
output = self.act4(output)
output = self.fc2(output)
return output
class LeNet5_Wider_Linear_NoMaxPool(nn.Module):
'''Use more filters for the first two conv layers.
'''
def __init__(self, num_classes: int = 10, num_channels: int = 1):
super(LeNet5_Wider_Linear_NoMaxPool, self).__init__()
self.conv1 = nn.Conv2d(num_channels, 100, kernel_size=(3, 3), stride=2) # 14x14
self.act1 = nn.Identity()
self.conv2 = nn.Conv2d(100, 100, kernel_size=(3, 3), stride=2) # 7x7
self.act2 = nn.Identity()
self.conv3 = nn.Conv2d(100, 100, kernel_size=(3, 3), stride=2) # 3x3
self.act3 = nn.Identity()
self.fc1 = nn.Linear(900, 84)
self.act4 = nn.Identity()
self.fc2 = nn.Linear(84, num_classes)
def forward(self, img):
output = self.conv1(img)
output = self.act1(output)
output = self.conv2(output)
output = self.act2(output)
output = self.conv3(output)
output = self.act3(output)
feature = output.view(output.size(0), -1)
output = self.fc1(feature)
output = self.act4(output)
output = self.fc2(output)
return output
def lenet5(num_classes, num_channels, **kwargs):
return LeNet5(num_classes, num_channels)
def lenet5_mini(num_classes, num_channels, **kwargs):
return LeNet5_Mini(num_classes, num_channels)
def lenet5_linear(num_classes, num_channels, **kwargs):
return LeNet5_Linear(num_classes, num_channels)
def lenet5_wider_linear(num_classes, num_channels, **kwargs):
return LeNet5_Wider_Linear(num_classes, num_channels)
def lenet5_wider_linear_nomaxpool(num_classes, num_channels, **kwargs):
return LeNet5_Wider_Linear_NoMaxPool(num_classes, num_channels) | 6,492 | 36.97076 | 87 | py |
Smile-Pruning | Smile-Pruning-master/src/model/mlp.py | import torch
import torch.nn as nn
import math
class FCNet(nn.Module):
def __init__(self, dim_input, num_classes, num_fc, width=0, num_params=0, branch_layer_out_dim=[], act='relu', dropout=0):
super(FCNet, self).__init__()
# activation func
if act == 'relu':
activation = nn.ReLU()
elif act == 'lrelu':
activation = nn.LeakyReLU()
elif act == 'linear':
activation = nn.Identity()
else:
raise NotImplementedError
num_middle = num_fc - 2
if width == 0:
# Given total num of parameters budget, calculate the width: num_middle * width^2 + width * (dim_input + num_classes) = num_params
assert num_params > 0
Delta = (dim_input + num_classes) * (dim_input + num_classes) + 4 * num_middle * num_params
width = (math.sqrt(Delta) - dim_input - num_classes) / 2 / num_middle
width = int(width)
print("FC net width = %s" % width)
# build the stem net
net = [nn.Linear(dim_input, width), activation]
for i in range(num_middle):
net.append(nn.Linear(width, width))
if dropout and num_middle - i <= 2: # the last two middle fc layers will be applied with dropout
net.append(nn.Dropout(dropout))
net.append(activation)
net.append(nn.Linear(width, num_classes))
self.net = nn.Sequential(*net)
# build branch layers
branch = []
for x in branch_layer_out_dim:
branch.append(nn.Linear(width, x))
self.branch = nn.Sequential(*branch) # so that the whole model can be put on cuda
self.branch_layer_ix = []
def forward(self, img, branch_out=False, mapping=False):
'''
<branch_out>: if output the internal features
<mapping>: if the internal features go through a mapping layer
'''
if not branch_out:
img = img.view(img.size(0), -1)
return self.net(img)
else:
out = []
start = 0
y = img.view(img.size(0), -1)
keys = [int(x) for x in self.branch_layer_ix]
for i in range(len(keys)):
end = keys[i] + 1
y = self.net[start:end](y)
y_branch = self.branch[i](y) if mapping else y
out.append(y_branch)
start = end
y = self.net[start:](y)
out.append(y)
return out
# Refer to: A Signal Propagation Perspective for Pruning Neural Networks at Initialization (ICLR 2020).
# https://github.com/namhoonlee/spp-public/blob/32bde490f19b4c28843303f1dc2935efcd09ebc9/spp/network.py#L108
def mlp_7_linear(num_classes=10, num_channels=1, **kwargs):
return FCNet(dim_input=1024, num_classes=num_classes, num_fc=7, width=100, act='linear')
def mlp_7_relu(num_classes=10, num_channels=1, **kwargs):
return FCNet(dim_input=1024, num_classes=num_classes, num_fc=7, width=100, act='relu') | 3,070 | 40.5 | 143 | py |
Smile-Pruning | Smile-Pruning-master/src/model/alexnet_celeba.py | import torch
import torch.nn as nn
from option import args
import model.generator as g
from torchvision.models import mobilenet_v2, alexnet
class AlexNet(nn.Module):
def __init__(self, drop=0.5):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2)),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
)
self.classifier = nn.Sequential(
nn.Dropout(p=drop),
nn.Linear(in_features=9216, out_features=4096, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=drop),
nn.Linear(in_features=4096, out_features=4096, bias=True),
nn.ReLU(inplace=True),
nn.Linear(in_features=4096, out_features=2, bias=True),
)
def forward(self, x, out_feat=False):
x = self.features(x)
x = x.view(x.size(0), -1)
feat = self.classifier[:6](x) # upto and include ReLU
x = self.classifier[6:](feat)
if out_feat:
return x, feat
else:
return x
class AlexNet_half(nn.Module):
def __init__(self, drop=0.5):
super(AlexNet_half, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2)),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(32, 96, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.Conv2d(96, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
)
self.classifier = nn.Sequential(
nn.Dropout(p=drop),
nn.Linear(in_features=4608, out_features=2048, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=drop),
nn.Linear(in_features=2048, out_features=2048, bias=True),
nn.ReLU(inplace=True),
nn.Linear(in_features=2048, out_features=2, bias=True),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def ccl(lr_G, lr_S, G_ix, equal_distill=False, embed=False):
T = AlexNet()
if equal_distill:
S = AlexNet()
else:
S = AlexNet_half(drop=0.7)
G = g.Generator6()
optim_G = torch.optim.Adam(G.parameters(), lr=lr_G)
optim_S = torch.optim.Adam(S.parameters(), lr=lr_S)
return T, S, G, optim_G, optim_S
def train_teacher(lr_T, embed=False, student=False):
T = AlexNet()
if student:
T = AlexNet_half()
optim_T = torch.optim.Adam(T.parameters(), lr=lr_T)
return T, optim_T
def kd(lr_S, equal=False, embed=False):
T = AlexNet()
if equal:
S = AlexNet()
else:
S = AlexNet_half()
optim_S = torch.optim.Adam(S.parameters(), lr=lr_S)
return T, S, optim_S | 4,126 | 32.827869 | 84 | py |
Smile-Pruning | Smile-Pruning-master/src/model/weight_normalization_layer.py | import torch.nn as nn
import torch
from torch.nn import functional as F
class Conv2D_WN(nn.Conv2d):
'''Conv2D with weight normalization.
'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros', # TODO: refine this type
device=None,
dtype=None
):
super(Conv2D_WN, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias, padding_mode=padding_mode)
# set up the scale variable in weight normalization
self.wn_scale = nn.Parameter(torch.ones(out_channels), requires_grad=True)
for i in range(self.weight.size(0)):
self.wn_scale.data[i] = torch.norm(self.weight.data[i])
def forward(self, input):
w = F.normalize(self.weight, dim=(1,2,3))
# print(w.shape)
# print(torch.norm(w[0]))
# print(self.wn_scale)
w = w * self.wn_scale.view(-1,1,1,1)
return F.conv2d(input, w, self.bias, self.stride,
self.padding, self.dilation, self.groups) | 1,249 | 32.783784 | 82 | py |
Smile-Pruning | Smile-Pruning-master/src/model/alexnet_cifar10.py | import torch
import torch.nn as nn
from option import args
import model.generator as g
# ref to ZSKD: https://github.com/vcl-iisc/ZSKD/blob/master/model_training/include/model_alex_full.py
class AlexNet_cifar10(nn.Module):
def __init__(self, model=None, fixed=False):
super(AlexNet_cifar10, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 48, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(48),
nn.Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(128),
nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.BatchNorm2d(192),
nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.BatchNorm2d(192),
nn.Conv2d(192, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(128), # output: 128x3x3
)
self.classifier = nn.Sequential(
nn.Linear(in_features=1152, out_features=512, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
# nn.BatchNorm2d(512),
nn.Linear(in_features=512, out_features=256, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
# nn.BatchNorm2d(256),
nn.Linear(in_features=256, out_features=10, bias=True),
)
self.fc_bn1 = nn.BatchNorm2d(512).cuda()
self.fc_bn2 = nn.BatchNorm2d(256).cuda()
def forward(self, x, out_feat=False):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier[ :3](x); x = x.view(x.size(0), -1, 1, 1); x = self.fc_bn1(x); x = x.view(x.size(0), -1)
x = self.classifier[3:6](x); x = x.view(x.size(0), -1, 1, 1); f = self.fc_bn2(x); f = f.view(f.size(0), -1)
x = self.classifier[ 6](f)
if out_feat:
return x, f
else:
return x
# ref to ZSKD: https://github.com/vcl-iisc/ZSKD/blob/master/model_training/include/model_alex_half.py
class AlexNet_cifar10_student(nn.Module):
def __init__(self, model=None, fixed=False, drop=0.5):
super(AlexNet_cifar10_student, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 24, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(24),
nn.Conv2d(24, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(64),
nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.BatchNorm2d(96),
nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.BatchNorm2d(96),
nn.Conv2d(96, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False),
nn.BatchNorm2d(64), # output: 128x3x3
)
self.classifier = nn.Sequential(
nn.Linear(in_features=576, out_features=256, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=drop),
# nn.BatchNorm2d(256),
nn.Linear(in_features=256, out_features=128, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=drop),
# nn.BatchNorm2d(128),
nn.Linear(in_features=128, out_features=10, bias=True),
)
self.fc_bn1 = nn.BatchNorm2d(256).cuda()
self.fc_bn2 = nn.BatchNorm2d(128).cuda()
def forward(self, x, out_feat=False):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier[ :3](x); x = x.view(x.size(0), -1, 1, 1); x = self.fc_bn1(x); x = x.view(x.size(0), -1)
x = self.classifier[3:6](x); x = x.view(x.size(0), -1, 1, 1); f = self.fc_bn2(x); f = f.view(f.size(0), -1)
x = self.classifier[ 6](f)
if out_feat:
return x, f
else:
return x
def ccl(lr_G, lr_S, G_ix, equal_distill=False, embed=False):
T = AlexNet_cifar10()
if equal_distill:
S = AlexNet_cifar10()
else:
S = AlexNet_cifar10_student()
G = eval("g.Generator" + G_ix)()
optim_G = torch.optim.Adam(G.parameters(), lr=lr_G)
optim_S = torch.optim.SGD(S.parameters(), lr=lr_S, momentum=0.9, weight_decay=5e-4)
return T, S, G, optim_G, optim_S
def train_teacher(lr_T, embed=False, student=False):
T = AlexNet_cifar10()
if student:
T = AlexNet_cifar10_student()
optim_T = torch.optim.Adam(T.parameters(), lr=lr_T)
return T, optim_T
def kd(lr_S, equal=False, embed=False):
T = AlexNet_cifar10()
S = AlexNet_cifar10() if equal else AlexNet_cifar10_student()
optim_S = torch.optim.Adam(S.parameters(), lr=lr_S)
return T, S, optim_S | 5,336 | 36.584507 | 111 | py |
Smile-Pruning | Smile-Pruning-master/src/model/resnet_cifar10.py | '''
Refer to: https://github.com/akamaster/pytorch_resnet_cifar10/blob/master/resnet.py
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .weight_normalization_layer import Conv2D_WN
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class LambdaLayer2(nn.Module):
def __init__(self, planes):
super(LambdaLayer2, self).__init__()
self.planes = planes
def forward(self, x):
y = F.pad(x[:, :, ::2, ::2],
(0, 0, 0, 0, self.planes//4, self.planes//4), "constant", 0)
return y
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A', conv_type='default'):
super(BasicBlock, self).__init__()
Conv2d = Conv2D_WN if conv_type == 'wn' else nn.Conv2d # @mst: weight normalization
self.conv1 = Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
# self.downsample = LambdaLayer(lambda x:
# F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
# @mingsun-tse: when pickling, the above lambda func will cause an error, so I replace it.
self.downsample = LambdaLayer2(planes)
elif option == 'B':
self.downsample = nn.Sequential(
Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.downsample(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks,
num_classes: int = 10,
num_channels: int = 3,
conv_type: str = 'default'):
super(ResNet, self).__init__()
self.in_planes = 16
Conv2d = Conv2D_WN if conv_type == 'wn' else nn.Conv2d # @mst: weight normalization
self.conv1 = Conv2d(num_channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
def resnet32(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [5, 5, 5], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
def resnet44(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [7, 7, 7], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
def resnet56(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [9, 9, 9], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
def resnet110(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [18, 18, 18], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
def resnet1202(num_classes=10, num_channels=3, **kwargs):
return ResNet(BasicBlock, [200, 200, 200], num_classes=num_classes, num_channels=num_channels, conv_type=kwargs['conv_type'])
| 6,133 | 36.631902 | 129 | py |
Smile-Pruning | Smile-Pruning-master/src/model/wrn.py | """
This code is from 2019-NIPS-ZSKT (Spotlight): https://github.com/polo5/ZeroShotKnowledgeTransfer/blob/master/models/wresnet.py
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import model.generator as g
import copy
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, out_feat=False):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8); embed = out
out = out.view(-1, self.nChannels)
if out_feat:
return self.fc(out), embed
else:
return self.fc(out)
def ccl(lr_G, lr_S, G_ix, equal_distill=False, embed=False):
T = WideResNet(depth=40, num_classes=10, widen_factor=2, dropRate=0.0)
if equal_distill:
S = WideResNet(depth=40, num_classes=10, widen_factor=2, dropRate=0.0)
else:
S = WideResNet(depth=16, num_classes=10, widen_factor=2, dropRate=0.0)
G = eval("g.Generator" + G_ix)()
optim_G = torch.optim.Adam(G.parameters(), lr=lr_G)
optim_S = torch.optim.SGD(S.parameters(), lr=lr_S, momentum=0.9, weight_decay=5e-4)
return T, S, G, optim_G, optim_S
def train_teacher(lr_T, embed=False, student=False):
T = WideResNet(depth=40, num_classes=10, widen_factor=2, dropRate=0.0)
if student:
T = WideResNet(depth=16, num_classes=10, widen_factor=2, dropRate=0.0)
optim_T = torch.optim.SGD(T.parameters(), lr=lr_T, momentum=0.9, weight_decay=5e-4)
return T, optim_T
def kd(lr_S, equal=False, embed=False):
T = WideResNet(depth=40, num_classes=10, widen_factor=2, dropRate=0.0)
if equal:
S = copy.deepcopy(T)
else:
S = WideResNet(depth=16, num_classes=10, widen_factor=2, dropRate=0.0)
optim_S = torch.optim.SGD(S.parameters(), lr=lr_S, momentum=0.9, weight_decay=5e-4)
return T, S, optim_S
if __name__ == '__main__':
import random
import time
from torchsummary import summary
x = torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1)
### WideResNets
# Notation: W-depth-widening_factor
#model = WideResNet(depth=16, num_classes=10, widen_factor=1, dropRate=0.0)
#model = WideResNet(depth=16, num_classes=10, widen_factor=2, dropRate=0.0)
#model = WideResNet(depth=16, num_classes=10, widen_factor=8, dropRate=0.0)
#model = WideResNet(depth=16, num_classes=10, widen_factor=10, dropRate=0.0)
#model = WideResNet(depth=22, num_classes=10, widen_factor=8, dropRate=0.0)
#model = WideResNet(depth=34, num_classes=10, widen_factor=2, dropRate=0.0)
#model = WideResNet(depth=40, num_classes=10, widen_factor=10, dropRate=0.0)
#model = WideResNet(depth=40, num_classes=10, widen_factor=1, dropRate=0.0)
model = WideResNet(depth=40, num_classes=10, widen_factor=2, dropRate=0.0)
###model = WideResNet(depth=50, num_classes=10, widen_factor=2, dropRate=0.0)
t0 = time.time()
output, *act = model(x)
print("Time taken for forward pass: {} s".format(time.time() - t0))
print("\nOUTPUT SHPAE: ", output.shape)
summary(model, input_size=(3, 32, 32)) | 6,478 | 42.193333 | 126 | py |
couta | couta-main/main.py | """
@author: Hongzuo Xu
@comments: testbed for time series anomaly detection
"""
import argparse
import os
import time
import pickle
import numpy as np
from main_utils import prepare, run, get_data_lst
# -------------------------------- argument parser --------------------------------#
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default=f'data_processed/')
parser.add_argument('--results_dir', type=str, default='@results/', help='dataset name')
parser.add_argument('--data', type=str, default='ASD',help='dataset name')
parser.add_argument("--entities", type=str, default='omi-1',
help='FULL represents all the entities, or a list of entity names split by comma')
parser.add_argument('--algo', type=str, default='COUTA',
choices=['COUTA', 'COUTA_wto_nac', 'COUTA_wto_umc', 'Canonical'])
parser.add_argument('--device', help='torch device', type=str, default='cuda', choices=['cuda', 'cpu'])
parser.add_argument('--runs', help='', type=int, default='1')
parser.add_argument('--log_path', type=str, default='log/')
parser.add_argument('--save_pred', action='store_true', default=False)
parser.add_argument('--flag', type=str, default='')
parser.add_argument('--record_avg', type=int, default=1)
args = parser.parse_args()
# -------------------------------- running preparation --------------------------------#
results_raw_dir, model_class, model_configs, logger = prepare(args)
logger_fh, logger_fh_raw, logger_fh_avg = logger
# print the header of results files
cur_time = time.strftime("%Y-%m-%d %H.%M.%S", time.localtime())
header = f'\n' \
f'--------------------------------------------------------------------\n' \
f'Time: {cur_time}, flag: {args.flag} \n' \
f'Data: {args.data}, Algo: {args.algo}, Runs: {args.runs} \n' \
f'Configs: {model_configs} \n' \
f'--------------------------------------------------------------------\n'
logger_fh.info(header)
logger_fh_raw.info(header)
logger_fh_avg.info(header)
header2 = f'data, adj_auroc, adj_aupr, adj_f1, adj_p, adj_r, ' \
f'adj_auroc_std, adj_aupr_std, adj_f1_std, adj_p_std, adj_r_std, time'
logger_fh_avg.info(header2)
# -------------------------------- Reading Data --------------------------------#
train_df_lst, test_df_lst, label_lst, name_lst = get_data_lst(args.data, args.data_dir, entities=args.entities)
name_lst = [args.data + '-' + n for n in name_lst]
# -------------------------------- Running --------------------------------#
start_time = time.time()
f1_lst = []
aupr_lst = []
for train, test, label, name in zip(train_df_lst, test_df_lst, label_lst, name_lst):
entries = []
t_lst = []
for i in range(args.runs):
logger_fh.info(f'\n\n Running {args.algo} on {name} [{i+1}/{args.runs}], '
f'cur_time: {time.strftime("%Y-%m-%d %H.%M.%S", time.localtime())}')
t1 = time.time()
# running
model_configs['seed'] = 42 + i
model_configs['umc'] = 0 if 'wto_umc' in args.algo else 1
model_configs['nac'] = 0 if 'wto_nac' in args.algo else 1
model = model_class(**model_configs)
predictions, eval_metrics, adj_eval_metrics = run(train, test, label, model, data_name=name)
entries.append(adj_eval_metrics)
t = time.time() - t1
t_lst.append(t)
# save prediction raw results
if args.save_pred:
prediction_path = os.path.join(results_raw_dir, f'{name}+{args.algo}@{i}.pkl')
f = open(prediction_path, 'wb')
pickle.dump(predictions, f)
f.close()
# save raw results of evaluation metrics
txt = f'{name},'
txt += ', '.join(['%.4f' % a for a in eval_metrics]) + \
', pa, ' + \
', '.join(['%.4f' % a for a in adj_eval_metrics])
txt += f', model, {args.algo}, time, {t:.1f} s, runs, {i+1}/{args.runs}'
logger_fh.info(txt)
logger_fh_raw.info(txt)
avg_entry = np.average(np.array(entries), axis=0)
std_entry = np.std(np.array(entries), axis=0)
avg_t = np.average(np.array(t_lst))
f1_lst.append(avg_entry[2])
aupr_lst.append(avg_entry[1])
txt = f'{name}, ' + ", ".join(['%.4f' % a for a in np.hstack([avg_entry, std_entry])]) + f', {avg_t:.1f} s'
logger_fh.info(txt)
logger_fh_avg.info(txt)
if args.record_avg:
logger_fh.info(f'\nf1, {np.average(f1_lst):.4f}, aupr, {np.average(aupr_lst):.4f}, '
f'time, {(time.time()-start_time):.1f}')
logger_fh_avg.info(f'\nf1, {np.average(f1_lst):.4f}, aupr, {np.average(aupr_lst):.4f}, '
f'time, {(time.time()-start_time):.1f}')
results_raw_dir_new = results_raw_dir.replace('raw-record', f'[done] raw-record')
os.rename(results_raw_dir, results_raw_dir_new)
| 4,848 | 39.07438 | 111 | py |
couta | couta-main/src/utils_general.py | import sys
sys.path.append('../src')
import numpy as np
import torch
import random
from matplotlib import pyplot as plt
import seaborn as sns
from src import utils_eval
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_sequences_data(data, data_root):
name_lst = []
train_df_lst = []
test_df_lst = []
label_lst = []
train_x = np.load(f'{data_root}{data}/{data}_trainx.npy')
test_x = np.load(f'{data_root}{data}/{data}_testx.npy')
train_y = np.load(f'{data_root}{data}/{data}_trainy.npy')
test_y = np.load(f'{data_root}{data}/{data}_testy.npy')
print(train_x.shape, test_x.shape)
train_df_lst.append(train_x)
test_df_lst.append(test_x)
label_lst.append(test_y)
name_lst.append(data)
return train_df_lst, test_df_lst, label_lst, name_lst
def minmax_norm(arr):
arr = np.array(arr)
_min_, _max_ = np.min(arr), np.max(arr)
r = _max_ - _min_ if _max_ != _min_ else 1
arr_new = np.array([(a - _min_) / r for a in arr])
return arr_new
def data_standardize(X_train, X_test, remove=False, verbose=False, max_clip=5, min_clip=-4):
mini, maxi = X_train.min(), X_train.max()
for col in X_train.columns:
if maxi[col] != mini[col]:
X_train[col] = (X_train[col] - mini[col]) / (maxi[col] - mini[col])
X_test[col] = (X_test[col] - mini[col]) / (maxi[col] - mini[col])
X_test[col] = np.clip(X_test[col], a_min=min_clip, a_max=max_clip)
else:
assert X_train[col].nunique() == 1
if remove:
if verbose:
print("Column {} has the same min and max value in train. Will remove this column".format(col))
X_train = X_train.drop(col, axis=1)
X_test = X_test.drop(col, axis=1)
else:
if verbose:
print("Column {} has the same min and max value in train. Will scale to 1".format(col))
if mini[col] != 0:
X_train[col] = X_train[col] / mini[col] # Redundant operation, just for consistency
X_test[col] = X_test[col] / mini[col]
if verbose:
print("After transformation, train unique vals: {}, test unique vals: {}".format(
X_train[col].unique(),
X_test[col].unique()))
return X_train, X_test
def meta_process_scores(predictions_dic, name):
if predictions_dic["score_t"] is None:
assert predictions_dic['error_tc'] is not None
predictions_dic['score_tc'] = predictions_dic['error_tc']
predictions_dic['score_t'] = np.sum(predictions_dic['error_tc'], axis=1)
"""
Following [Garg 2021 TNNLS], Unlike the other datasets, each entity in MSL and SMAP consists of only 1 sensor
while all the other channels are one-hot-encoded commands given to that entity.
Therefore, for dataset MSL and SMAP, use all channels as input to the models, but use the model error of only
the sensor channel for anomaly detection.
"""
if 'MSL' in name or 'SMAP' in name:
# if error_tc is not None:
# predictions_dic['error_t'] = error_tc[:, 0]
if predictions_dic['score_tc'] is not None:
predictions_dic['score_t'] = predictions_dic['score_tc'][:, 0]
return predictions_dic
# ----------------------------------------- visualization --------------------------------------- #
def plt_full_chart(df, y=None, save_path=None):
n_dim = df.shape[1]
length = df.shape[0]
fig = plt.figure(figsize=(20, 1.8*n_dim))
anom_pairs = []
if y is not None:
anom_index = np.where(y==1)[0]
tmp_seg = []
for i in anom_index:
tmp_seg.append(i)
if i + 1 not in anom_index:
anom_pairs.append((tmp_seg[0], tmp_seg[-1]))
tmp_seg = []
palette = sns.color_palette("cividis", n_dim)
sns.set_theme(palette=palette, style='ticks')
for ii in range(n_dim):
value = df.iloc[:, ii].values
col = df.columns[ii]
fig.add_subplot(n_dim, 1, ii + 1)
sns.lineplot(x=np.arange(length), y=value, legend=True, label=col, color=palette[0])
value_min = np.around(value.min(), decimals=1) - 0.1
value_max = np.around(value.max(), decimals=1) + 0.1
y = np.linspace(value_min, value_max, 10)
for pair in anom_pairs:
x1 = np.ones(len(y)) * pair[0]
x2 = np.ones(len(y)) * pair[1]
plt.fill_betweenx(y, x1, x2, alpha=0.3, color='goldenrod')
plt.legend(loc='upper right')
plt.xlim(0, length)
plt.ylim(value_min, value_max)
if ii != n_dim-1:
plt.xticks([])
plt.xlabel('')
plt.subplots_adjust(wspace=0, hspace=0.09)
if save_path is not None:
plt.savefig(save_path)
return
def plt_res_with_dat(score_lst, y, data, name_lst=None, ytick_range=None):
n_algo = len(score_lst)
fig = plt.figure(figsize=(15, 4*n_algo))
index = np.arange(len(score_lst[0]))
anom_pairs = []
if y is not None:
anom_index = np.where(y == 1)[0]
tmp_seg = []
for i in anom_index:
tmp_seg.append(i)
if i + 1 not in anom_index:
anom_pairs.append((tmp_seg[0], tmp_seg[-1]))
tmp_seg = []
fig.add_subplot(n_algo+1, 1, 1)
sns.lineplot(x=index, y=data, color=sns.color_palette('Greys_r')[0])
plt.xlim(index[0], index[-1])
value_min = np.around(data.min(), decimals=1) - 0.1
value_max = np.around(data.max(), decimals=1) + 0.1
values_ = np.linspace(value_min, value_max, 10)
for pair in anom_pairs:
x1 = np.ones(len(values_)) * pair[0]
x2 = np.ones(len(values_)) * pair[1]
plt.fill_betweenx(values_, x1, x2, alpha=0.3, color='goldenrod')
for ii, score in enumerate(score_lst):
fig.add_subplot(n_algo+1, 1, 2+ii)
palette = sns.color_palette("cividis")
sns.set_theme(palette=palette, style='ticks')
# # scale scores
# if np.max(score)!= np.min(score):
# score = (score - np.min(score)) / (np.max(score) - np.min(score))
# else:
# score = np.zeros_like(score)
adj_score = utils_eval.adjust_scores(y, score)
sns.lineplot(x=index, y=score, legend=True, color=palette[0])
value_min = np.around(score.min(), decimals=1) - 0.1
value_max = np.around(score.max(), decimals=1) + 0.1
values_ = np.linspace(value_min, value_max, 10)
for pair in anom_pairs:
x1 = np.ones(len(values_)) * pair[0]
x2 = np.ones(len(values_)) * pair[1]
plt.fill_betweenx(values_, x1, x2, alpha=0.3, color='goldenrod')
plt.xlim(index[0], index[-1])
plt.ylim(0, 1)
if ytick_range is not None:
plt.ylim(ytick_range[0], ytick_range[1])
if name_lst is not None:
plt.title(name_lst[ii])
plt.show()
return fig
def plt_res(score, y, ytick_range=None):
fig = plt.figure(figsize=(20, 2))
adj_score = utils_eval.adjust_scores(y, score)
best_f1, best_p, best_r, best_th = utils_eval.get_best_f1(y, adj_score)
plt.axhline(best_th, color='r', linewidth=0.4, linestyle='-.')
anom_pairs = []
if y is not None:
anom_index = np.where(y == 1)[0]
tmp_seg = []
for i in anom_index:
tmp_seg.append(i)
if i + 1 not in anom_index:
anom_pairs.append((tmp_seg[0], tmp_seg[-1]))
tmp_seg = []
palette = sns.color_palette("cividis")
sns.set_theme(palette=palette, style='ticks')
index = np.arange(len(score))
# sns.scatterplot(x=index, y=score, legend=True, color=palette[0], s=5, marker='x')
sns.lineplot(x=index, y=score, legend=True, color=palette[0])
value_min = np.around(score.min(), decimals=1) - 0.1
value_max = np.around(score.max(), decimals=1) + 0.1
values_ = np.linspace(value_min, value_max, 10)
for pair in anom_pairs:
x1 = np.ones(len(values_)) * pair[0]
x2 = np.ones(len(values_)) * pair[1]
plt.fill_betweenx(values_, x1, x2, alpha=0.3, color='goldenrod')
plt.xlim(index[0], index[-1])
plt.ylim(value_min, value_max)
if ytick_range is not None:
plt.ylim(ytick_range[0], ytick_range[1])
plt.show()
return fig
def plt_res_multi(score_lst, y, title_lst=None, ytick_range=None):
n_algo = len(score_lst)
fig = plt.figure(figsize=(20, 1.6 * n_algo))
for aa in range(n_algo):
ax = fig.add_subplot(n_algo, 1, aa + 1)
score = score_lst[aa]
adj_score = utils_eval.adjust_scores(y, score)
best_f1, best_p, best_r, best_th = utils_eval.get_best_f1(y, adj_score)
plt.axhline(best_th, color='r', linewidth=0.4, linestyle='-.')
anom_pairs = []
if y is not None:
anom_index = np.where(y == 1)[0]
tmp_seg = []
for i in anom_index:
tmp_seg.append(i)
if i + 1 not in anom_index:
anom_pairs.append((tmp_seg[0], tmp_seg[-1]))
tmp_seg = []
palette = sns.color_palette("cividis")
sns.set_theme(palette=palette, style='ticks')
index = np.arange(len(score))
# sns.scatterplot(x=index, y=score, legend=True, color=palette[0], s=5, marker='x')
sns.lineplot(x=index, y=score, legend=True, label=title_lst[aa], color=palette[0])
value_min = np.around(score.min(), decimals=1) - 0.1
value_max = np.around(score.max(), decimals=1) + 0.1
values_ = np.linspace(value_min, value_max, 10)
for pair in anom_pairs:
x1 = np.ones(len(values_)) * pair[0]
x2 = np.ones(len(values_)) * pair[1]
plt.fill_betweenx(values_, x1, x2, alpha=0.3, color='goldenrod')
plt.xlim(index[0], index[-1])
plt.ylim(value_min, value_max)
if ytick_range is not None:
plt.ylim(ytick_range[0], ytick_range[1])
if aa != n_algo - 1:
plt.xticks([])
plt.subplots_adjust(wspace=0, hspace=0.1)
return fig | 10,351 | 33.506667 | 115 | py |
couta | couta-main/src/algorithms/algorithm_utils.py | import os
import random
import numpy as np
import torch
import string
mask = ''.join(random.sample(string.ascii_letters, 8))
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, intermediate_dir,
patience=7, verbose=False, delta=5e-5, model_name="",
trace_func=print, structrue='torch'):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.structure = structrue
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
if structrue == 'torch':
self.path = os.path.join(intermediate_dir, model_name, model_name + "." + mask + '_checkpoint.pt')
elif structrue == 'keras':
self.path = os.path.join(intermediate_dir, model_name, model_name + "." + mask + '_checkpoint.pt')
self.trace_func = trace_func
os.makedirs(os.path.split(self.path)[0], exist_ok=True)
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score <= self.best_score + self.delta:
self.counter += 1
# self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(
f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
if self.structure == 'torch':
torch.save(model.state_dict(), self.path)
elif self.structure == 'keras':
model.save(self.path)
self.val_loss_min = val_loss
def get_sub_seqs(x_arr, seq_len, stride=1, start_discont=np.array([])):
"""
:param start_discont: the start points of each sub-part in case the x_arr is just multiple parts joined together
:param x_arr: dim 0 is time, dim 1 is channels
:param seq_len: size of window used to create subsequences from the data
:param stride: number of time points the window will move between two subsequences
:return:
"""
excluded_starts = []
[excluded_starts.extend(range((start - seq_len + 1), start)) for start in start_discont if start > seq_len]
seq_starts = np.delete(np.arange(0, x_arr.shape[0] - seq_len + 1, stride), excluded_starts)
x_seqs = np.array([x_arr[i:i + seq_len] for i in seq_starts])
return x_seqs
| 3,512 | 39.848837 | 116 | py |
couta | couta-main/src/algorithms/couta_algo.py | """
Calibrated One-class classifier for Unsupervised Time series Anomaly detection (COUTA)
@author: Hongzuo Xu (hongzuo.xu@gmail.com)
"""
import os
import random
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
from numpy.random import RandomState
from torch.utils.data import DataLoader
from src.algorithms.algorithm_utils import EarlyStopping, get_sub_seqs
from src.algorithms.net import NetModule
class COUTA:
def __init__(self, sequence_length=100, stride=1,
num_epochs=40, batch_size=64, lr=1e-4, ss_type='FULL',
hidden_dims=16, emb_dim=16, rep_hidden=16, pretext_hidden=16,
kernel_size=2, dropout=0.0, bias=True,
alpha=0.1, neg_batch_ratio=0.2, es=False, train_val_pc=0.25,
seed=0, device='cuda',
logger=None, model_dir='couta_model/',
save_model_path=None, load_model_path=None,
nac=True, umc=True
):
"""
COUTA class for Calibrated One-class classifier for Unsupervised Time series Anomaly detection
Parameters
----------
sequence_length: integer, default=100
sliding window length
stride: integer, default=1
sliding window stride
num_epochs: integer, default=40
the number of training epochs
batch_size: integer, default=64
the size of mini-batches
lr: float, default=1e-4
learning rate
ss_type: string, default='FULL'
types of perturbation operation type, which can be 'FULL' (using all
three anomaly types), 'point', 'contextual', or 'collective'.
hidden_dims: integer or list of integer, default=16,
the number of neural units in the hidden layer
emb_dim: integer, default=16
the dimensionality of the feature space
rep_hidden: integer, default=16
the number of neural units of the hidden layer
pretext_hidden: integer, default=16
kernel_size: integer, default=2
the size of the convolutional kernel in TCN
dropout: float, default=0
the dropout rate
bias: bool, default=True
the bias term of the linear layer
alpha: float, default=0.1
the weight of the classification head of NAC
neg_batch_ratio: float, default=0.2
the ratio of generated native anomaly examples
es: bool, default=False
early stopping
seed: integer, default=42
random state seed
device: string, default='cuda'
logger: logger or print, default=None
model_dir: string, default='couta_model/'
directory to store intermediate model files
nac: bool, default=True
used for ablation study
umc: bool, default=True
used for ablation study
"""
self.seq_len = sequence_length
self.stride = stride
self.batch_size = batch_size
self.num_epochs = num_epochs
self.lr = lr
self.device = device
self.ss_type = ss_type
self.kernel_size = kernel_size
self.dropout = dropout
self.hidden_dims = hidden_dims
self.rep_hidden = rep_hidden
self.pretext_hidden = pretext_hidden
self.emb_dim = emb_dim
self.bias = bias
self.alpha = alpha
self.neg_batch_size = int(neg_batch_ratio * self.batch_size)
self.max_cut_ratio = 0.5
self.es = es
self.train_val_pc = train_val_pc
self.log_func = logger.info if logger is not None else print
self.model_dir = model_dir
os.makedirs(model_dir, exist_ok=True)
param_lst = locals()
del param_lst['self']
del param_lst['device']
del param_lst['logger']
self.log_func(param_lst)
if seed is not None:
self.seed = seed
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
self.save_model_path = save_model_path
self.load_model_path = load_model_path
self.net = None
self.c = None
self.test_df = None
self.test_labels = None
# for ablation study
self.nac = nac
self.umc = umc
return
def fit(self, X: pd.DataFrame):
"""
Fit detector.
Parameters
----------
X: dataframe of pandas
input training set
"""
dim = X.shape[1]
data = X.values
sequences = get_sub_seqs(data, seq_len=self.seq_len, stride=self.stride)
sequences = sequences[RandomState(42).permutation(len(sequences))]
if self.train_val_pc > 0:
train_seqs = sequences[: -int(self.train_val_pc * len(sequences))]
val_seqs = sequences[-int(self.train_val_pc * len(sequences)):]
else:
train_seqs = sequences
val_seqs = None
self.net = self.network_init(dim)
self.set_c(train_seqs)
self.net = self.train(self.net, train_seqs, val_seqs)
if self.save_model_path is not None:
os.makedirs(os.path.split(self.save_model_path)[0], exist_ok=True)
state = {'model_state': self.net.state_dict(), 'c': self.c}
torch.save(state, self.save_model_path)
return
def train(self, net, train_seqs, val_seqs=None):
val_loader = DataLoader(dataset=SubseqData(val_seqs),
batch_size=self.batch_size,
drop_last=False, shuffle=False) if val_seqs is not None else None
optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
criterion_oc = DSVDDLoss(c=self.c)
criterion_oc_umc = DSVDDUncLoss(c=self.c, reduction='mean')
criterion_mse = torch.nn.MSELoss(reduction='mean')
early_stp = EarlyStopping(intermediate_dir=self.model_dir,
patience=7, delta=1e-6, model_name='couta', verbose=False)
y0 = -1 * torch.ones(self.batch_size).float().to(self.device)
net.train()
for i in range(self.num_epochs):
train_loader = DataLoader(dataset=SubseqData(train_seqs),
batch_size=self.batch_size,
drop_last=True, pin_memory=True, shuffle=True)
rng = RandomState(seed=self.seed+i)
epoch_seed = rng.randint(0, 1e+6, len(train_loader))
loss_lst, loss_oc_lst, loss_ssl_lst, = [], [], []
for ii, x0 in enumerate(train_loader):
x0 = x0.float().to(self.device)
x0_output = net(x0)
if self.umc:
rep_x0 = x0_output[0]
rep_x0_dup = x0_output[1]
loss_oc = criterion_oc_umc(rep_x0, rep_x0_dup)
else:
rep = x0_output[0]
loss_oc = criterion_oc(rep)
if self.nac:
neg_cand_idx = RandomState(epoch_seed[ii]).randint(0, self.batch_size, self.neg_batch_size)
x1, y1 = create_batch_neg(batch_seqs=x0[neg_cand_idx],
max_cut_ratio=self.max_cut_ratio,
seed=epoch_seed[ii],
return_mul_label=False,
ss_type=self.ss_type)
x1, y1 = x1.to(self.device), y1.to(self.device)
y = torch.hstack([y0, y1])
x1_output = net(x1)
pred_x1 = x1_output[-1]
pred_x0 = x0_output[-1]
out = torch.cat([pred_x0, pred_x1]).view(-1)
loss_ssl = criterion_mse(out, y)
else:
loss_ssl = 0.
loss = loss_oc + self.alpha * loss_ssl
net.zero_grad()
loss.backward()
optimizer.step()
loss_lst.append(loss)
loss_oc_lst.append(loss_oc)
# loss_ssl_lst.append(loss_ssl)
epoch_loss = torch.mean(torch.stack(loss_lst)).data.cpu().item()
epoch_loss_oc = torch.mean(torch.stack(loss_oc_lst)).data.cpu().item()
# epoch_loss_ssl = torch.mean(torch.stack(loss_ssl_lst)).data.cpu().item()
# validation phase
val_loss = np.NAN
if val_seqs is not None:
val_loss = []
with torch.no_grad():
for x in val_loader:
x = x.float().to(self.device)
x_out = net(x)
if self.umc:
loss = criterion_oc_umc(x_out[0], x_out[1])
else:
loss = criterion_oc(x_out[0])
loss = torch.mean(loss)
val_loss.append(loss)
val_loss = torch.mean(torch.stack(val_loss)).data.cpu().item()
if (i+1) % 10 == 0:
self.log_func(
f'|>>> epoch: {i+1:02} | loss: {epoch_loss:.6f}, '
f'loss_oc: {epoch_loss_oc:.6f}, '
# f'loss_ssl: {epoch_loss_ssl:.6f}, <<<|'
f'val_loss: {val_loss:.6f}'
)
if self.es:
# early_metric = val_loss+epoch_loss if val_loader is not None else epoch_loss
early_metric = epoch_loss_oc
early_stp(early_metric, model=net)
if early_stp.early_stop:
net.load_state_dict(torch.load(early_stp.path))
self.log_func("early stop")
break
if i == self.num_epochs - 1:
net.load_state_dict(torch.load(early_stp.path))
return net
def predict(self, X):
"""
Predict raw anomaly score of X using the fitted detector.
For consistency, outliers are assigned with larger anomaly scores.
Parameters
----------
X: pd.DataFrame
testing dataframe
Returns
-------
predictions_dic: dictionary of predicted results
The anomaly score of the input samples.
"""
data = X.values
test_sub_seqs = get_sub_seqs(data, seq_len=self.seq_len, stride=1)
test_dataset = SubseqData(test_sub_seqs)
dataloader = DataLoader(dataset=test_dataset, batch_size=self.batch_size, drop_last=False, shuffle=False)
if self.load_model_path is not None:
state = torch.load(self.load_model_path)
self.net = self.network_init(data.shape[1])
self.net.load_state_dict(state['model_state'])
self.c = state['c']
representation_lst = []
representation_lst2 = []
self.net.eval()
with torch.no_grad():
for x in dataloader:
x = x.float().to(self.device)
x_output = self.net(x)
representation_lst.append(x_output[0])
if self.umc:
representation_lst2.append(x_output[1])
reps = torch.cat(representation_lst)
dis = torch.sum((reps - self.c) ** 2, dim=1).data.cpu().numpy()
if self.umc:
reps_dup = torch.cat(representation_lst2)
dis2 = torch.sum((reps_dup - self.c) ** 2, dim=1).data.cpu().numpy()
dis = dis + dis2
dis_pad = np.hstack([0 * np.ones(data.shape[0] - dis.shape[0]), dis])
predictions_dic = {'score_t': dis_pad,
'score_tc': None,
'error_t': None,
'error_tc': None,
'recons_tc': None,
}
return predictions_dic
def network_init(self, dim):
net = NetModule(
input_dim=dim,
hidden_dims=self.hidden_dims,
emb_dim=self.emb_dim,
pretext_hidden=self.pretext_hidden,
rep_hidden=self.rep_hidden,
out_dim=1,
kernel_size=self.kernel_size,
dropout=self.dropout,
linear_bias=self.bias,
tcn_bias=self.bias,
pretext=True if self.nac else False,
dup=True if self.umc else False
)
net.to(self.device)
return net
def set_c(self, seqs, eps=0.1):
"""Initializing the center for the hypersphere"""
dataloader = DataLoader(dataset=SubseqData(seqs), batch_size=self.batch_size,
drop_last=True, pin_memory=True, shuffle=True)
z_ = []
self.net.eval()
with torch.no_grad():
for x in dataloader:
x = x.float().to(self.device)
x_output = self.net(x)
rep = x_output[0]
z_.append(rep.detach())
z_ = torch.cat(z_)
c = torch.mean(z_, dim=0)
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
self.c = c
def create_batch_neg(batch_seqs, max_cut_ratio=0.5, seed=0, return_mul_label=False, ss_type='FULL'):
"""
create a batch of negative samples based on the input sequences,
the output batch size is the same as the input batch size
:param batch_seqs: input sequences
:param max_cut_ratio:
:param seed:
:param return_mul_label:
:param type:
:param ss_type:
:return:
"""
rng = np.random.RandomState(seed=seed)
batch_size, l, dim = batch_seqs.shape
cut_start = l - rng.randint(1, int(max_cut_ratio * l), size=batch_size)
n_cut_dim = rng.randint(1, dim+1, size=batch_size)
cut_dim = [rng.randint(dim, size=n_cut_dim[i]) for i in range(batch_size)]
if type(batch_seqs) == np.ndarray:
batch_neg = batch_seqs.copy()
neg_labels = np.zeros(batch_size, dtype=int)
else:
batch_neg = batch_seqs.clone()
neg_labels = torch.LongTensor(batch_size)
if ss_type != 'FULL':
pool = rng.randint(1e+6, size=int(1e+4))
if ss_type == 'collective':
pool = [a % 6 == 0 or a % 6 == 1 for a in pool]
elif ss_type == 'contextual':
pool = [a % 6 == 2 or a % 6 == 3 for a in pool]
elif ss_type == 'point':
pool = [a % 6 == 4 or a % 6 == 5 for a in pool]
flags = rng.choice(pool, size=batch_size, replace=False)
else:
flags = rng.randint(1e+5, size=batch_size)
n_types = 6
for ii in range(batch_size):
flag = flags[ii]
# collective anomalies
if flag % n_types == 0:
batch_neg[ii, cut_start[ii]:, cut_dim[ii]] = 0
neg_labels[ii] = 1
elif flag % n_types == 1:
batch_neg[ii, cut_start[ii]:, cut_dim[ii]] = 1
neg_labels[ii] = 1
# contextual anomalies
elif flag % n_types == 2:
mean = torch.mean(batch_neg[ii, -10:, cut_dim[ii]], dim=0)
batch_neg[ii, -1, cut_dim[ii]] = mean + 0.5
neg_labels[ii] = 2
elif flag % n_types == 3:
mean = torch.mean(batch_neg[ii, -10:, cut_dim[ii]], dim=0)
batch_neg[ii, -1, cut_dim[ii]] = mean - 0.5
neg_labels[ii] = 2
# point anomalies
elif flag % n_types == 4:
batch_neg[ii, -1, cut_dim[ii]] = 2
neg_labels[ii] = 3
elif flag % n_types == 5:
batch_neg[ii, -1, cut_dim[ii]] = -2
neg_labels[ii] = 3
if return_mul_label:
return batch_neg, neg_labels
else:
neg_labels = torch.ones(batch_size).long()
return batch_neg, neg_labels
class SubseqData(Dataset):
def __init__(self, x, y=None, w1=None, w2=None):
self.sub_seqs = x
self.label = y
self.sample_weight1 = w1
self.sample_weight2 = w2
def __len__(self):
return len(self.sub_seqs)
def __getitem__(self, idx):
if self.label is not None and self.sample_weight1 is not None and self.sample_weight2 is not None:
return self.sub_seqs[idx], self.label[idx], self.sample_weight1[idx], self.sample_weight2[idx]
if self.label is not None:
return self.sub_seqs[idx], self.label[idx]
elif self.sample_weight1 is not None and self.sample_weight2 is None:
return self.sub_seqs[idx], self.sample_weight[idx]
elif self.sample_weight1 is not None and self.sample_weight2 is not None:
return self.sub_seqs[idx], self.sample_weight1[idx], self.sample_weight2[idx]
return self.sub_seqs[idx]
class DSVDDUncLoss(torch.nn.Module):
def __init__(self, c, reduction='mean'):
super(DSVDDUncLoss, self).__init__()
self.c = c
self.reduction = reduction
def forward(self, rep, rep2):
dis1 = torch.sum((rep - self.c) ** 2, dim=1)
dis2 = torch.sum((rep2 - self.c) ** 2, dim=1)
var = (dis1 - dis2) ** 2
loss = 0.5*torch.exp(torch.mul(-1, var)) * (dis1+dis2) + 0.5*var
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
class DSVDDLoss(torch.nn.Module):
def __init__(self, c, reduction='mean'):
super(DSVDDLoss, self).__init__()
self.c = c
self.reduction = reduction
def forward(self, rep, sample_weight=None):
loss = torch.sum((rep - self.c) ** 2, dim=1)
if sample_weight is not None:
loss = loss * sample_weight
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
| 18,097 | 34.278752 | 113 | py |
couta | couta-main/src/algorithms/net.py | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding,
bias=True, dropout=0.2, residual=True):
"""
Residual block
:param n_inputs: int, input channels
:param n_outputs: int, output channels
:param kernel_size: int, convolutional kernel size
:param stride: int,
:param dilation: int,
:param padding: int,
:param dropout: float, dropout
"""
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, bias=bias,
dilation=dilation))
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, bias=bias,
dilation=dilation))
self.Chomp1d = Chomp1d(padding)
self.dropout = torch.nn.Dropout(dropout)
self.residual = residual
self.net = nn.Sequential(self.conv1, Chomp1d(padding), nn.ReLU(), nn.Dropout(dropout),
self.conv2, Chomp1d(padding), nn.ReLU(), nn.Dropout(dropout))
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
if self.residual:
res = x if self.downsample is None else self.downsample(x)
return out+res
else:
return out
class NetModule(torch.nn.Module):
def __init__(self, input_dim, hidden_dims=32, rep_hidden=32, pretext_hidden=16,
emb_dim=10, kernel_size=2, dropout=0.2, out_dim=2,
tcn_bias=True, linear_bias=True,
dup=False, pretext=False):
super(NetModule, self).__init__()
self.layers = []
if type(hidden_dims) == int: hidden_dims = [hidden_dims]
num_layers = len(hidden_dims)
for i in range(num_layers):
dilation_size = 2 ** i
padding_size = (kernel_size-1) * dilation_size
in_channels = input_dim if i == 0 else hidden_dims[i-1]
out_channels = hidden_dims[i]
self.layers += [TemporalBlock(in_channels, out_channels, kernel_size,
stride=1, dilation=dilation_size,
padding=padding_size, dropout=dropout,
bias=tcn_bias, residual=True)]
self.network = nn.Sequential(*self.layers)
self.l1 = nn.Linear(hidden_dims[-1], rep_hidden, bias=linear_bias)
self.l2 = nn.Linear(rep_hidden, emb_dim, bias=linear_bias)
self.act = torch.nn.LeakyReLU()
self.dup = dup
self.pretext = pretext
if dup:
self.l1_dup = nn.Linear(hidden_dims[-1], rep_hidden, bias=linear_bias)
if pretext:
self.pretext_l1 = nn.Linear(hidden_dims[-1], pretext_hidden, bias=linear_bias)
self.pretext_l2 = nn.Linear(pretext_hidden, out_dim, bias=linear_bias)
def forward(self, x):
out = self.network(x.transpose(2, 1)).transpose(2, 1)
out = out[:, -1]
rep = self.l2(self.act(self.l1(out)))
# pretext head
if self.pretext:
score = self.pretext_l2(self.act(self.pretext_l1(out)))
if self.dup:
rep_dup = self.l2(self.act(self.l1_dup(out)))
return rep, rep_dup, score
else:
return rep, score
else:
if self.dup:
rep_dup = self.l2(self.act(self.l1_dup(out)))
return rep, rep_dup
else:
return rep
| 4,435 | 35.360656 | 94 | py |
couta | couta-main/src/algorithms/canonical_oc_algo.py | import pandas as pd
import numpy as np
import torch
import random
import os
from numpy.random import RandomState
from torch.utils.data import DataLoader, Dataset
from .algorithm_utils import get_sub_seqs, EarlyStopping
from src.algorithms.net import NetModule
class Canonical:
def __init__(self, sequence_length=100, stride=1,
num_epochs=40, batch_size=64, lr=1e-4,
hidden_dims=16, emb_dim=16, rep_hidden=16,
kernel_size=2, dropout=0.0, bias=True,
seed=0, es=False, device='cuda',
data_name=None, logger=None, model_dir='couta_model/',
# useless parameters, consistent with the proposed model
pretext_hidden=10, alpha=0.1, neg_batch_ratio=0.2, **others,
):
self.seq_len = sequence_length
self.stride = stride
self.batch_size = batch_size
self.num_epochs = num_epochs
self.lr = lr
self.device = device
self.kernel_size = kernel_size
self.hidden_dims = hidden_dims
self.emb_dim = emb_dim
self.rep_hidden = rep_hidden
self.pretext_hidden = pretext_hidden
self.dropout = dropout
self.bias = bias
self.seed = seed
self.es = es
self.train_val_pc = 0.25
self.data_name = data_name
self.log_func = logger.info if logger is not None else print
self.model_dir = model_dir
os.makedirs(model_dir, exist_ok=True)
self.net = None
self.c = None
param_lst = locals()
del param_lst['self']
del param_lst['device']
del param_lst['logger']
del param_lst['data_name']
del param_lst['alpha']
del param_lst['neg_batch_ratio']
del param_lst['pretext_hidden']
self.log_func(param_lst)
if seed is not None:
self.seed = seed
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
return
def fit(self, X: pd.DataFrame):
dim = X.shape[1]
sequences = get_sub_seqs(X.values, seq_len=self.seq_len, stride=self.stride)
sequences = sequences[RandomState(42).permutation(len(sequences))]
if self.train_val_pc > 0:
val_seqs = sequences[-int(self.train_val_pc * len(sequences)):]
train_seqs = sequences[: -int(self.train_val_pc * len(sequences))]
else:
train_seqs = sequences
val_seqs = None
self.net = NetModule(
input_dim=dim,
hidden_dims=self.hidden_dims,
emb_dim=self.emb_dim,
pretext_hidden=self.pretext_hidden,
rep_hidden=self.rep_hidden,
out_dim=1,
kernel_size=self.kernel_size,
dropout=self.dropout,
linear_bias=self.bias,
tcn_bias=self.bias,
pretext=False,
dup=False
)
self.net.to(self.device)
self.set_c(train_seqs)
self.net = self.train(self.net, train_seqs, val_seqs)
return
def train(self, net, train_seqs, val_seqs=None):
val_loader = DataLoader(dataset=SubseqData(val_seqs), batch_size=self.batch_size,
drop_last=False, shuffle=False)
optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
criterion_dsvdd = DSVDDLoss(c=self.c)
early_stp = EarlyStopping(intermediate_dir=self.model_dir,
patience=7, model_name=self.__class__.__name__, verbose=False)
net.train()
for i in range(self.num_epochs):
train_loader = DataLoader(dataset=SubseqData(train_seqs), batch_size=self.batch_size,
drop_last=False, pin_memory=True, shuffle=True)
train_loss = []
for x in train_loader:
x = x.float().to(self.device)
rep = net(x)
loss = criterion_dsvdd(rep)
net.zero_grad()
loss.backward()
optimizer.step()
loss = loss.cpu().data.item()
train_loss.append(loss)
train_loss = np.mean(train_loss)
# Get Validation loss
val_loss = np.NAN
if val_loader is not None:
net.eval()
val_loss = []
with torch.no_grad():
for x in val_loader:
x = x.float().to(self.device)
rep = net(x)
loss = criterion_dsvdd(rep)
loss = loss.cpu().data.item()
val_loss.append(loss)
val_loss = np.mean(val_loss)
self.log_func(f'epoch: {i+1:02}, train_loss: {train_loss:.6f}, val_loss: {val_loss:.6f}')
# pbar.set_postfix(epoch_loss=f'{np.mean(train_loss):.6f}')
if self.es:
early_metric = val_loss if val_loader is not None else train_loss
early_stp(early_metric, model=net)
if early_stp.early_stop:
net.load_state_dict(torch.load(early_stp.path))
self.log_func("early stop")
break
return net
def predict(self, X):
data = X.values
test_sub_seqs = get_sub_seqs(data, seq_len=self.seq_len, stride=1)
test_dataset = SubseqData(test_sub_seqs)
dataloader = DataLoader(dataset=test_dataset, batch_size=self.batch_size, drop_last=False, shuffle=False)
rep_lst = []
self.net.eval()
with torch.no_grad():
for x in dataloader:
x = x.float().to('cuda')
rep = self.net(x)
rep_lst.append(rep)
rep_emb = torch.cat(rep_lst)
rep_score_ = torch.sum((rep_emb - self.c) ** 2, dim=1).data.cpu().numpy()
rep_score_pad = np.hstack([0 * np.ones(data.shape[0] - rep_score_.shape[0]), rep_score_])
predictions_dic = {'score_t': rep_score_pad,
'score_tc': None,
'error_t': None,
'error_tc': None,
'recons_tc': None,
}
return predictions_dic
def set_c(self, seqs, eps=0.1):
"""Initializing the center for the hypersphere"""
dataloader = DataLoader(dataset=SubseqData(seqs), batch_size=self.batch_size,
drop_last=True, pin_memory=True, shuffle=True)
z_ = []
self.net.eval()
with torch.no_grad():
for x in dataloader:
x = x.float().to(self.device)
rep = self.net(x)
z_.append(rep.detach())
z_ = torch.cat(z_)
c = torch.mean(z_, dim=0)
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
self.c = c
class SubseqData(Dataset):
def __init__(self, x, y=None, w1=None, w2=None):
self.sub_seqs = x
self.label = y
self.sample_weight1 = w1
self.sample_weight2 = w2
def __len__(self):
return len(self.sub_seqs)
def __getitem__(self, idx):
if self.label is not None and self.sample_weight1 is not None and self.sample_weight2 is not None:
return self.sub_seqs[idx], self.label[idx], self.sample_weight1[idx], self.sample_weight2[idx]
if self.label is not None:
return self.sub_seqs[idx], self.label[idx]
elif self.sample_weight1 is not None and self.sample_weight2 is None:
return self.sub_seqs[idx], self.sample_weight[idx]
elif self.sample_weight1 is not None and self.sample_weight2 is not None:
return self.sub_seqs[idx], self.sample_weight1[idx], self.sample_weight2[idx]
return self.sub_seqs[idx]
class DSVDDLoss(torch.nn.Module):
def __init__(self, c, reduction='mean'):
super(DSVDDLoss, self).__init__()
self.c = c
self.reduction = reduction
def forward(self, rep, sample_weight=None):
loss = torch.sum((rep - self.c) ** 2, dim=1)
if sample_weight is not None:
loss = loss * sample_weight
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss | 8,553 | 33.914286 | 113 | py |
Elektrum | Elektrum-main/src/runAmber_kinn.py | #!/usr/bin/env python
# coding: utf-8
# # Probablistic model building genetic algorithm
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
from src.kinetic_model import KineticModel, modelSpace_to_modelParams
from src.neural_network_builder import KineticNeuralNetworkBuilder, KineticEigenModelBuilder
from src.neural_search import search_env
from src.data import load_finkelstein_data as get_data
from src.model_spaces import get_cas9_uniform_ms, get_cas9_finkelstein_ms, get_cas9_finkelstein_ms_with_hidden
from src.reload import reload_from_dir
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import pandas as pd
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import os
import sys
import argparse
import pickle
import amber
print(amber.__version__)
from amber.architect import pmbga
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--target', type=str, choices="""wtCas9_cleave_rate_log
Cas9_enh_cleave_rate_log
Cas9_hypa_cleave_rate_log
Cas9_HF1_cleave_rate_log
wtCas9_cleave_rate_log_specificity
Cas9_enh_cleave_rate_log_specificity
Cas9_hypa_cleave_rate_log_specificity
Cas9_HF1_cleave_rate_log_specificity
wtCas9_ndABA
Cas9_enh_ndABA
Cas9_hypa_ndABA
Cas9_HF1_ndABA""".split(), required=True)
parser.add_argument('--use-sink-state', action="store_true", default=False)
parser.add_argument('--ms', type=str, choices=['finkelstein', 'uniform'], required=True)
parser.add_argument('--wd', type=str, required=True)
parser.add_argument('--n-states', type=int, default=4, required=False)
parser.add_argument('--win-size', type=int, default=None, required=False)
parser.add_argument("--switch", type=int, default=0, help="switch to train on gRNA2, test on gRNA1; default 0-false")
args = parser.parse_args()
os.makedirs(args.wd, exist_ok=True)
pickle.dump(args, open(os.path.join(args.wd, "args.pkl"), "wb"))
return args
def main():
args = parse()
if args.ms == "finkelstein":
kinn_model_space = get_cas9_finkelstein_ms_with_hidden(use_sink_state=args.use_sink_state)
else:
kinn_model_space = get_cas9_uniform_ms(n_states=args.n_states, st_win_size=args.win_size, use_sink_state=args.use_sink_state)
print("use sink state:", args.use_sink_state)
print(kinn_model_space)
controller = pmbga.ProbaModelBuildGeneticAlgo(
model_space=kinn_model_space,
buffer_type='population',
buffer_size=50, # buffer size controlls the max history going back
batch_size=1, # batch size does not matter in this case; all arcs will be retrieved
ewa_beta=0.0 # ewa_beta approximates the moving average over 1/(1-ewa_beta) prev points
)
make_switch = args.switch != 0
logbase = 10
res = get_data(target=args.target, make_switch=make_switch, logbase=logbase, include_ref=False)
print("switch gRNA_1 to testing and gRNA_2 to training:", make_switch)
# unpack data tuple
(x_train, y_train), (x_test, y_test) = res
if args.use_sink_state:
output_op = lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(tf.clip_by_value(tf.reshape(- x[:,1], (-1,1)), 10**-5, 10**-1))/np.log(logbase), name="output_slice")
#output_op = lambda: tf.keras.layers.Lambda(lambda x: tf.clip_by_value(tf.reshape(- x[:,1], (-1,1)), 10**-5, 10**-1), name="output_slice")
else:
output_op = lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(tf.clip_by_value(x, 10**-5, 10**-1))/np.log(logbase), name="output_log")
#output_op = lambda: tf.keras.layers.Dense(units=1, activation="linear", name="output_nonneg", kernel_constraint=tf.keras.constraints.NonNeg())
# trainEnv parameters
evo_params = dict(
model_fn = KineticEigenModelBuilder if args.use_sink_state else KineticNeuralNetworkBuilder,
samps_per_gen = 10, # how many arcs to sample in each generation; important
max_gen = 600,
patience = 200,
n_warmup_gen = 0,
train_data = (x_train, y_train),
test_data = (x_test, y_test)
)
# this learning rate is trickier than usual, for eigendecomp to work
initial_learning_rate = 0.01
batch_size = 512
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=10*int(7000/batch_size), # decrease every 10 epochs
decay_rate=0.9,
staircase=True)
manager_kwargs = {
'output_op': output_op,
'n_feats': 25,
'n_channels': 9,
'batch_size': batch_size,
'epochs': 300,
'earlystop': 15,
'optimizer': lambda: tf.keras.optimizers.Adam(learning_rate=lr_schedule, clipnorm=1.0),
'verbose': 0
}
controller, hist, stat_df = search_env(
controller=controller,
wd=args.wd,
evo_params=evo_params,
manager_kwargs=manager_kwargs
)
# plot the best model
mb = reload_from_dir(wd=args.wd, manager_kwargs=manager_kwargs, model_fn=evo_params['model_fn'])
tf.keras.utils.plot_model(mb.model, to_file=os.path.join(args.wd, "model.png"))
y_hat = mb.predict(x_test).flatten()
h = sns.jointplot(y_test, y_hat)
h.set_axis_labels("obs", "pred", fontsize=16)
p = ss.pearsonr(y_hat, y_test)
h.fig.suptitle("Testing prediction, pcc=%.3f"%p[0], fontsize=16)
plt.savefig(os.path.join(args.wd, "test_pred.png"))
return controller
if __name__ == "__main__":
if not amber.utils.run_from_ipython():
main()
| 5,631 | 39.517986 | 174 | py |
Elektrum | Elektrum-main/src/neural_search.py | from src.kinetic_model import KineticModel, modelSpace_to_modelParams
from src.neural_network_builder import KineticNeuralNetworkBuilder, KineticEigenModelBuilder
import tensorflow.compat.v1 as tf1
import tensorflow as tf
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import os, sys, shutil, pickle, gc, time
from datetime import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# ## A For-Loop that does the work for `amber.architect.trainEnv`
def search_env(controller, wd, evo_params, manager_kwargs=None, disable_posterior_update=False):
manager_kwargs = manager_kwargs or {}
# unpack evo params
samps_per_gen = evo_params.get("samps_per_gen", 10)
max_gen = evo_params.get("max_gen", 200)
patience = evo_params.get("patience", 20)
n_warmup_gen = evo_params.get("n_warmup_gen", 0)
model_fn = evo_params.get("model_fn", KineticNeuralNetworkBuilder)
output_op = evo_params.get("output_op", None)
x_train, y_train = evo_params.get("train_data", (None, None))
x_test, y_test = evo_params.get("test_data", (None, None))
# variables to be filled
hist = []
pc_cnt = 0
best_indv = 0
stat_df = pd.DataFrame(columns=['Generation', 'GenAvg', 'Best', 'PostVar'])
for generation in range(max_gen):
try:
start = time.time()
has_impr = False
#for _ in tqdm(range(samps_per_gen), total=samps_per_gen, position=0, leave=True):
for _ in range(samps_per_gen):
# get arc
arc, _ = controller.get_action()
# get reward
try:
test_reward = get_reward_pipeline(arc,
x_train=x_train,
y_train=y_train,
x_test=x_test,
y_test=y_test,
wd=wd,
model_fn=model_fn,
**manager_kwargs
)
#except ValueError:
# test_reward = 0
except Exception as e:
raise e
rate_df = None
# update best, or increase patience counter
if test_reward > best_indv:
best_indv = test_reward
has_impr = True
shutil.move(os.path.join(wd, "bestmodel.h5"), os.path.join(wd, "AmberSearchBestModel.h5"))
shutil.move(os.path.join(wd, "model_params.pkl"), os.path.join(wd, "AmberSearchBestModel_config.pkl"))
# store
_ = controller.store(action=arc, reward=test_reward)
hist.append({'gen': generation, 'arc':arc, 'test_reward': test_reward, 'rate_df': rate_df})
end = time.time()
if generation < n_warmup_gen:
print(f"Gen {generation} < {n_warmup_gen} warmup.. skipped - Time %.2f" % (end-start), flush=True)
continue
if disable_posterior_update is True:
controller.buffer.finish_path(controller.model_space, generation, working_dir=wd)
else:
_ = controller.train(episode=generation, working_dir=wd)
post_vars = [np.var(x.sample(size=100)) for _, x in controller.model_space_probs.items()]
stat_df = stat_df.append({
'Generation': generation,
'GenAvg': controller.buffer.r_bias,
'Best': best_indv,
'PostVar': np.mean(post_vars)
}, ignore_index=True)
print("[%s] Gen %i - Mean fitness %.3f - Best %.4f - PostVar %.3f - Time %.2f" % (
datetime.now().strftime("%H:%M:%S"),
generation,
controller.buffer.r_bias,
best_indv,
np.mean(post_vars),
end-start), flush=True)
#if delta < epsilon:
# print("stop due to convergence criteria")
# break
pc_cnt = 0 if has_impr else pc_cnt+1
if pc_cnt >= patience:
print("early-stop due to max patience w/o improvement")
break
except KeyboardInterrupt:
print("user interrupted")
break
# write out
a = pd.DataFrame(hist)
a['arc'] = ['|'.join([f"{x.Layer_attributes['RANGE_ST']}-{x.Layer_attributes['RANGE_ST']+x.Layer_attributes['RANGE_D']}-k{x.Layer_attributes['kernel_size']}" for x in entry]) for entry in a['arc']]
a.drop(columns=['rate_df'], inplace=True)
a.to_csv(os.path.join(wd,"train_history.tsv"), sep="\t", index=False)
ax = stat_df.plot.line(x='Generation', y=['GenAvg', 'Best'])
ax.set_ylabel("Reward (Pearson correlation)")
ax.set_xlabel("Generation")
plt.savefig(os.path.join(wd, "reward_vs_time.png"))
# save
pickle.dump(controller, open(os.path.join(wd, "controller_states.pkl"), "wb"))
# plot
make_plots(controller, canvas_nrow=np.ceil(np.sqrt(len(controller.model_space))), wd=wd)
return controller, hist, stat_df
# poorman's manager get reward
def get_reward_pipeline(model_arcs, x_train, y_train, x_test, y_test, wd, model_fn=None, **kwargs):
# unpack keyword args
n_channels = kwargs.get("n_channels", 9)
n_feats = kwargs.get("n_feats", 25)
replace_conv_by_fc = kwargs.get("replace_conv_by_fc", False)
opt = kwargs.get("optimizer", None)
output_op = kwargs.get("output_op", None)
from warnings import simplefilter
simplefilter(action='ignore', category=DeprecationWarning)
tf1.logging.set_verbosity(tf1.logging.ERROR)
train_graph = tf1.Graph()
train_sess = tf1.Session(graph=train_graph)
model_params = modelSpace_to_modelParams(model_arcs)
pickle.dump(model_params, open(os.path.join(wd, "model_params.pkl"), "wb"))
tf1.reset_default_graph()
with train_graph.as_default(), train_sess.as_default():
kinn_test = KineticModel(model_params)
mb = model_fn(
kinn=kinn_test,
session=train_sess,
output_op=output_op,
n_channels=n_channels,
n_feats=n_feats,
replace_conv_by_fc=replace_conv_by_fc
)
# train and test
opt = opt() if opt else "adam"
mb.build(optimizer=opt, plot=False, output_act=False)
model = mb.model
x_train_b = mb.blockify_seq_ohe(x_train)
x_test_b = mb.blockify_seq_ohe(x_test)
checkpointer = ModelCheckpoint(filepath=os.path.join(wd,"bestmodel.h5"), mode='min', verbose=0, save_best_only=True,
save_weights_only=True)
earlystopper = EarlyStopping(
monitor="val_loss",
mode='min',
patience=kwargs.get("earlystop", 5),
verbose=0)
try:
hist = model.fit(x_train_b, y_train,
batch_size=kwargs.get("batch_size", 128),
validation_data=(x_test_b, y_test),
callbacks=[checkpointer, earlystopper],
epochs=kwargs.get("epochs", 20),
verbose=kwargs.get("verbose", 0))
model.load_weights(os.path.join(wd,"bestmodel.h5"))
y_hat = model.predict(x_test_b).flatten()
reward_fn = kwargs.get("reward_fn", lambda y_hat, y_test: ss.pearsonr(y_hat, y_test)[0])
test_reward = reward_fn(y_hat, y_test)
except tf.errors.InvalidArgumentError as e: # eigen could fail
test_reward = np.nan
#raise e
#except ValueError:
# test_reward = np.nan
#test_reward = ss.spearmanr(y_hat, y_test).correlation
if np.isnan(test_reward):
test_reward = 0
del train_graph, train_sess
del model
tf.keras.backend.clear_session() # THIS IS IMPORTANT!!!
gc.collect()
return test_reward
def make_plots(controller, canvas_nrow, wd):
canvas_nrow = int(canvas_nrow)
tot_distr = set([k[-1] for k in controller.model_space_probs])
for distr_key in tot_distr:
fig, axs_ = plt.subplots(canvas_nrow, canvas_nrow, figsize=(4.5*canvas_nrow,4.5*canvas_nrow))
axs = [axs_[i][j] for i in range(len(axs_)) for j in range(len(axs_[i]))]
for k in controller.model_space_probs:
if k[-1] == distr_key:
try:
d = controller.model_space_probs[k].sample(size=1000)
except:
continue
ax = axs[k[0]]
sns.distplot(d, label="Post", ax=ax)
sns.distplot(controller.model_space_probs[k].prior_dist, label="Prior", ax=ax)
ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mode', str(ss.mode(d).mode[0])]))
fig.suptitle(distr_key)
fig.tight_layout()
fig.savefig(os.path.join(wd, f"{distr_key}.png"))
| 9,180 | 43.139423 | 201 | py |
Elektrum | Elektrum-main/src/transfer_learn.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""AMBER NAS for incorporating KINN of different state numbers, and sequence context-specific effects
"""
# FZZ, 2022/10/16
import tensorflow as tf
import numpy as np
import os
import pickle
import h5py
from src.neural_network_builder import KineticNeuralNetworkBuilder
from src.kinetic_model import KineticModel
from amber.modeler.dag import get_layer
from amber.modeler import ModelBuilder
from amber import Amber
from amber.utils import run_from_ipython
from amber.architect import ModelSpace, Operation
import copy
import numpy as np
import scipy.stats as ss
from tensorflow.keras.optimizers import Adam
import argparse
# since tf 1.15 does not have multi-head attention officially implemented..
# we use this workaround
from keras_multi_head import MultiHeadAttention
class KinnLayer(tf.keras.layers.Layer):
def __init__(
self,
kinn_dir,
manager_kws,
kinn_trainable=False,
channels=None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.kinn_trainable = kinn_trainable
self.kinn_dir = kinn_dir
self.manager_kws = manager_kws
self.channels = channels
self.kinn_layers = {}
self.mb = None
# get default session to feed model builder
self.session = tf.keras.backend.get_session()
def get_config(self):
config = super(KinnLayer, self).get_config()
config.update(
{
"kinn_dir": self.kinn_dir,
"kinn_trainable": self.kinn_trainable,
"manager_kws": self.manager_kws,
"channels": self.channels,
}
)
return config
def build(self, input_shape):
assert (
isinstance(input_shape, (tuple, list)) and len(input_shape) == 2
), TypeError(
"Expect a list of (hidden, input); got %s for kinn input shape"
% input_shape
)
super().build(input_shape=input_shape)
# will create two tensors with the same names..
# mb = reload_from_dir(wd=self.kinn_dir, manager_kwargs=self.manager_kws, model_fn=KineticNeuralNetworkBuilder, sess=self.session)
# self.mb = mb
# self.kinn_layers = {l.name: l for l in self.mb.model.layers}
# self.kinn_header = tf.keras.models.Model(inputs=mb.model.inputs, outputs=self.kinn_layers['gather_rates'].output)
n_channels = self.manager_kws.get("n_channels", 9)
n_feats = self.manager_kws.get("n_feats", 25)
replace_conv_by_fc = self.manager_kws.get("replace_conv_by_fc", False)
output_op = self.manager_kws.get("output_op", None)
with open(
os.path.join(self.kinn_dir, "AmberSearchBestModel_config.pkl"), "rb"
) as f:
model_params = pickle.load(f)
self.bp = KineticModel(model_params)
self.mb = KineticNeuralNetworkBuilder(
kinn=self.bp,
session=self.session,
output_op=output_op,
n_feats=n_feats,
n_channels=n_channels,
replace_conv_by_fc=replace_conv_by_fc,
)
# returns hidden layers in a dict
ret = self.mb.build(return_intermediate=True)
self.kinn_layers = ret
self.kinn_header = tf.keras.models.Model(
inputs=[
self.kinn_layers["inputs_op"][j] for j in self.kinn_layers["inputs_op"]
],
outputs=self.kinn_layers["gather_rates"],
)
self.kinn_header.load_weights(
os.path.join(self.kinn_dir, "AmberSearchBestModel.h5")
)
self.output_op = output_op
# king-altman constants
self.king_altman_const = tf.constant(
self.mb.kinn.get_ka_pattern_mat().transpose(), dtype=tf.float32
)
self.rate_contrib_map = []
# rate_contrib_mat = (n_king_altman, n_rates)
rate_contrib_mat = self.mb.kinn.get_rate_contrib_matrix()
for k in range(rate_contrib_mat.shape[1]):
# get each column as mask
mask = rate_contrib_mat[:, k]
assert np.sum(mask) <= 1, "k=%i, mask error for %s" % (k, mask)
if np.sum(mask) == 0:
continue
rate_index = np.where(mask == 1)[0][0]
self.rate_contrib_map.append((k, rate_index))
# assert len(self.rate_contrib_map) == 1
self.units = rate_contrib_mat.shape[0]
self.lin_transform = tf.keras.layers.Dense(
units=self.units,
activation="linear",
kernel_initializer="zeros",
kernel_regularizer=tf.keras.regularizers.L1L2(l1=1e-6, l2=1e-3),
activity_regularizer=tf.keras.regularizers.L1L2(l1=0, l2=1e-6),
input_shape=(input_shape[0][-1],),
)
# check for kinn trainability
for layer in self.kinn_header.layers:
layer.trainable = self.kinn_trainable
def kinn_body(self, rates):
king_altman = tf.nn.softmax(tf.matmul(rates, self.king_altman_const))
k = [x[0] for x in self.rate_contrib_map]
rate_index = [x[1] for x in self.rate_contrib_map]
rate_layer = tf.math.exp(tf.gather(rates, rate_index, axis=-1))
ka_slice = tf.gather(king_altman, k, axis=-1)
activity = tf.reduce_prod(rate_layer * ka_slice, axis=-1, keepdims=True)
output = get_layer(x=activity, state=self.output_op, with_bn=False)
return output
def call(self, inputs):
hidden, seq_ohe = inputs[0], inputs[1]
# convert input to delta by linear transformation
delta = self.lin_transform(hidden)
# gather target channels, if necessary
if self.channels is not None:
seq_ohe = tf.gather(seq_ohe, self.channels, axis=-1)
inp_list = self.mb.blockify_seq_ohe(seq_ohe)
rates = self.kinn_header(inp_list)
rates = rates + delta
# kinn_body is implemented in forward pass
output = self.kinn_body(rates)
return output
class InceptionLayer(tf.keras.layers.Layer):
def __init__(self, filters, kernel_sizes, dilation_rates=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters = filters
self.kernel_sizes = kernel_sizes
assert len(self.filters) == len(self.kernel_sizes), ValueError(
"different lengths for filters and kernel-sizes"
)
if dilation_rates is None:
self.dilation_rates = [1 for _ in self.filters]
else:
self.dilation_rates = dilation_rates
assert len(self.filters) == len(self.dilation_rates), ValueError(
"different lengths for filters and dilation"
)
def get_config(self):
config = super(InceptionLayer, self).get_config()
config.update(
{
"filters": self.filters,
"kernel_sizes": self.kernel_sizes,
"dilation_rates": self.dilation_rates,
}
)
return config
def build(self, input_shape):
super().build(input_shape=input_shape)
self.conv_branches = []
for f, ks, d in zip(*[self.filters, self.kernel_sizes, self.dilation_rates]):
self.conv_branches.append(
tf.keras.layers.Conv1D(
filters=f,
kernel_size=ks,
padding="same",
activation="relu",
dilation_rate=d,
input_shape=input_shape,
)
)
def call(self, inputs):
convs = [conv_op(inputs) for conv_op in self.conv_branches]
layer_out = tf.concat(convs, axis=-1)
return layer_out
class AttentionPooling(tf.keras.layers.Layer):
"""Applies multihead self-attention to the patches extracted form the
trunk.
Args:
num_heads: The number attention heads.
dropout: Dropout after attention
flatten_op: Reshape operation.
Inputs:
Structured patches from the trunk.
Outputs:
Flatten patches after self-attention
"""
def __init__(self, num_heads=2, dropout=0.2, flatten_op="flatten", **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.dropout = dropout
assert flatten_op in ("flatten", "gap"), ValueError(
"flatten_op must be in ('flatten', 'gap')"
)
self.flatten_op = flatten_op
def get_config(self):
config = super().get_config()
config.update(
{
"dropout": self.dropout,
"num_heads": self.num_heads,
"flatten_op": self.flatten_op,
}
)
return config
def build(self, input_shape):
try:
self.attention = tf.keras.layers.MultiHeadAttention(
num_heads=self.num_heads, dropout=self.dropout,
)
self.attn_type = "tf"
except AttributeError:
self.attention = tf.keras.models.Sequential(
[
MultiHeadAttention(
head_num=self.num_heads, input_shape=input_shape
),
tf.keras.layers.Dropout(self.dropout),
]
)
self.attn_type = "keras-multi-head"
if self.flatten_op == "flatten":
self.flatten = tf.keras.layers.Flatten()
elif self.flatten_op == "gap":
self.flatten = tf.keras.layers.GlobalAveragePooling1D()
else:
raise Exception()
super().build(input_shape=input_shape)
def call(self, x):
if self.attn_type == "tf":
out = self.attention([x, x])
elif self.attn_type == "keras-multi-head":
out = self.attention(x)
else:
raise Exception()
out = self.flatten(out)
return out
def BiDirectional(**kwargs):
units = kwargs.get("units", 8)
return tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units, return_sequences=True), name="BiDir%i" % units
)
def get_kinn_output_node():
return tf.keras.layers.Lambda(
lambda x: tf.math.log(tf.clip_by_value(x, 10 ** -7, 10 ** -1)) / np.log(10),
name="output",
)
def get_model_space_kinn():
# Setup and params.
state_space = ModelSpace()
default_params = {}
base_filter = 64
param_list = [
# Block 1:
[
{
"filters": base_filter,
"kernel_size": 1,
"activation": "relu",
"padding": "same",
"name": "conv11",
},
{
"filters": base_filter,
"kernel_size": 3,
"activation": "relu",
"padding": "same",
"name": "conv13",
},
{
"filters": base_filter,
"kernel_size": 3,
"activation": "relu",
"dilation_rate": 4,
"padding": "same",
"name": "conv13d4",
},
{
"filters": base_filter,
"kernel_size": 5,
"activation": "relu",
"padding": "same",
"name": "conv15",
},
],
# Block 2:
[
{
"filters": base_filter * 2,
"kernel_size": 1,
"activation": "relu",
"padding": "same",
"name": "conv21",
},
{
"filters": base_filter * 2,
"kernel_size": 3,
"activation": "relu",
"padding": "same",
"name": "conv23",
},
{
"filters": base_filter * 2,
"kernel_size": 3,
"activation": "relu",
"dilation_rate": 4,
"padding": "same",
"name": "conv23d4",
},
{
"filters": base_filter * 2,
"kernel_size": 5,
"activation": "relu",
"padding": "same",
"name": "conv25",
},
],
]
# Build state space.
layer_embedding_sharing = {}
conv_seen = 0
for i in range(len(param_list)):
# Build conv states for this layer.
conv_states = []
for j in range(len(param_list[i])):
d = copy.deepcopy(default_params)
for k, v in param_list[i][j].items():
d[k] = v
conv_states.append(Operation("conv1d", **d))
# Add Inception Layer; reduce the filter number for each branch to match complexity
conv_states.append(
Operation(
"InceptionLayer",
filters=[base_filter * (2 ** (conv_seen)) // 4 for _ in range(4)],
kernel_sizes=[1, 3, 3, 5],
dilation_rates=[1, 1, 4, 1],
name=f"inception_{conv_seen}",
)
)
# Add a zero Layer
if conv_seen > 0:
conv_states.append(Operation("identity"))
else:
conv_states.append(
Operation(
"conv1d",
activation="linear",
filters=base_filter,
kernel_size=1,
name="conv1_lin",
)
)
state_space.add_layer(conv_seen * 2, conv_states)
if i > 0:
layer_embedding_sharing[conv_seen * 2] = 0
conv_seen += 1
# Add pooling states, if is the last conv.
if i == len(param_list) - 1:
bidirectional = [
Operation("BiDirectional", units=32),
Operation("BiDirectional", units=16),
Operation("Identity"),
]
state_space.add_layer(conv_seen * 2 - 1, bidirectional)
pool_states = [
Operation("Flatten"),
Operation(
"AttentionPooling",
flatten_op="flatten",
dropout=0.2,
name="AtnFlat",
),
# Operation('GlobalAvgPool1D'),
# Operation('AttentionPooling', flatten_op='gap', dropout=0.1, name='AtnGap')
]
state_space.add_layer(conv_seen * 2, pool_states)
else:
# Add dropout
state_space.add_layer(
conv_seen * 2 - 1,
[
Operation("Identity"),
Operation("Dropout", rate=0.1),
Operation("Dropout", rate=0.3),
Operation("Dropout", rate=0.5),
],
)
if i > 0:
layer_embedding_sharing[conv_seen * 2 - 1] = 1
# Add an optional Dense layer
state_space.add_layer(
conv_seen * 2 + 1,
[
Operation("Dense", units=64, activation="relu"),
Operation("Identity"),
],
)
# Add final KINN layer.
state_space.add_layer(
conv_seen * 2 + 2,
[
# state 4
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-21/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep4-gRNA1/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_f41",
),
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-21/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep5-gRNA2/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_f42",
),
# state 5
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-30/KINN-wtCas9_cleave_rate_log-uniform-5-rep2-gRNA1/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_u51",
),
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-30/KINN-wtCas9_cleave_rate_log-uniform-5-rep3-gRNA2/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_u52",
),
# state 6
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-30/KINN-wtCas9_cleave_rate_log-uniform-6-rep2-gRNA1/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_u61",
),
Operation(
"KinnLayer",
kinn_dir="outputs/2022-05-30/KINN-wtCas9_cleave_rate_log-uniform-6-rep2-gRNA2/",
manager_kws={"output_op": get_kinn_output_node},
channels=np.arange(4, 13),
name="kinn_u62",
),
],
)
return state_space, layer_embedding_sharing
class TransferKinnModelBuilder(ModelBuilder):
def __init__(self, inputs_op, output_op, model_compile_dict, model_space, **kwargs):
self.model_compile_dict = model_compile_dict
self.input_node = inputs_op[0]
self.output_node = output_op[0]
self.model_space = model_space
self.custom_objects = {
"InceptionLayer".lower(): InceptionLayer,
"KinnLayer".lower(): KinnLayer,
"AttentionPooling".lower(): AttentionPooling,
"BiDirectional".lower(): BiDirectional,
}
def __call__(self, model_states):
assert self.model_space is not None
inp = get_layer(None, self.input_node, custom_objects=self.custom_objects)
x = inp
for i, state in enumerate(model_states):
if issubclass(type(state), int) or np.issubclass_(type(state), np.integer):
op = self.model_space[i][state]
elif isinstance(state, Operation) or callable(state):
op = state
else:
raise Exception(
"cannot understand %s of type %s" % (state, type(state))
)
# if is KinnLayer, additionally connect the input op to here
if op.Layer_type == "kinnlayer": # no easy way right now
x = get_layer([x, inp], op, custom_objects=self.custom_objects)
else:
x = get_layer(x, op, custom_objects=self.custom_objects)
out = get_layer(x, self.output_node, custom_objects=self.custom_objects)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model_compile_dict = copy.deepcopy(self.model_compile_dict)
opt = model_compile_dict.pop("optimizer")()
metrics = [
x() if callable(x) else x for x in model_compile_dict.pop("metrics", [])
]
model.compile(optimizer=opt, metrics=metrics, **model_compile_dict)
return model
def amber_app(wd, model_space=None, run=False):
# First, define the components we need to use
with h5py.File("data/inVivoData.newValidSplit.h5", "r") as store:
train_data = store.get("train")["x"][()], store.get("train")["y"][()]
valid_data = store.get("valid")["x"][()], store.get("valid")["y"][()]
# unpack data tuple
(x_train, y_train), (x_valid, y_valid) = train_data, valid_data
type_dict = {
"controller_type": "GeneralController",
"knowledge_fn_type": "zero",
"reward_fn_type": "LossAucReward",
# FOR RL-NAS
"modeler_type": TransferKinnModelBuilder,
"manager_type": "GeneralManager",
"env_type": "ControllerTrainEnv",
}
# Next, define the specifics
os.makedirs(wd, exist_ok=True)
input_node = [Operation("input", shape=(25, 13), name="input")]
output_node = [
Operation(
"dense",
units=1,
activation="sigmoid",
name="output_final",
kernel_constraint=tf.keras.constraints.NonNeg(),
bias_constraint=tf.keras.constraints.NonNeg(),
# XXX: this is super important!! if init values are clipped below zero, grads will diminish from sigmoid. FZZ 20221027
kernel_initializer=tf.keras.initializers.Constant(1.75),
bias_initializer=tf.keras.initializers.Constant(3.75),
)
]
if model_space is not None:
with open(model_space, "rb") as f:
model_space, layer_embedding_sharing = pickle.load(f)
else:
model_space, layer_embedding_sharing = get_model_space_kinn()
with open(os.path.join(wd, "model_space.pkl"), "wb") as f:
pickle.dump((model_space, layer_embedding_sharing), f)
batch_size = 25000
use_ppo = False
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.005,
decay_steps=int(1000000 / batch_size) * 10, # decay lr every 30 epochs
decay_rate=0.9,
staircase=False,
)
model_compile_dict = {
"loss": "binary_crossentropy",
"optimizer": lambda: Adam(learning_rate=lr_schedule),
"metrics": ["acc", lambda: tf.keras.metrics.AUC(curve="PR")],
}
specs = {
"model_space": model_space,
"controller": {
"share_embedding": layer_embedding_sharing,
"with_skip_connection": False,
"skip_weight": None,
"lstm_size": 32,
"lstm_num_layers": 1,
"kl_threshold": 0.005,
"train_pi_iter": 50 if use_ppo else 10,
"optim_algo": "adam",
"rescale_advantage_by_reward": False,
"temperature": 2.0,
"tanh_constant": 1.5,
"buffer_size": 5, # FOR RL-NAS
"batch_size": 3,
"use_ppo_loss": use_ppo,
},
"model_builder": {
"batch_size": batch_size,
"inputs_op": input_node,
"outputs_op": output_node,
"model_compile_dict": model_compile_dict,
},
"knowledge_fn": {"data": None, "params": {}},
"reward_fn": {"method": "aupr", "batch_size": batch_size,},
"manager": {
"data": {
"train_data": (x_train, y_train),
"validation_data": (x_valid, y_valid),
},
"params": {
"epochs": 20,
"fit_kwargs": {
"earlystop_patience": 5,
# "class_weight": {0:1., 1:10.}
},
"predict_kwargs": {"batch_size": batch_size},
"child_batchsize": batch_size,
"store_fn": "model_plot",
"working_dir": wd,
"verbose": 0,
},
},
"train_env": {
"max_episode": 100,
"max_step_per_ep": 5,
"working_dir": wd,
"time_budget": "72:00:00",
"with_skip_connection": False,
"save_controller_every": 1,
},
}
# finally, run program
amb = Amber(types=type_dict, specs=specs)
if run:
amb.run()
return amb
if __name__ == "__main__":
if not run_from_ipython():
parser = argparse.ArgumentParser(
description="Script for AMBER-search of Single-task runner"
)
parser.add_argument("--wd", type=str, help="working directory")
parser.add_argument("--model-space", default=None, required=False, type=str, help="filepath to a pre-configured model space pickle file; if None, will use built-in model space")
args = parser.parse_args()
amber_app(wd=args.wd, model_space=args.model_space, run=True)
# Or, run this in ipython terminal:
"""
%run src/transfer_learn
amb = amber_app(wd="outputs/test_tl_amber")
amb.manager.verbose = 1
#arc = [3,0,3,1,1,2,0,2]
#amb.manager.get_rewards(0, arc)
amb.run()
"""
| 24,213 | 33.690544 | 185 | py |
Elektrum | Elektrum-main/src/neural_network_builder.py | """class for converting a kinetic model hypothesis to a keras neural network
"""
from amber.utils import corrected_tf as tf
import tensorflow as tf2
from tensorflow.keras.layers import Input, Conv1D, Dense, Concatenate, Lambda, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras import regularizers
from tensorflow.keras.utils import plot_model
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from amber.modeler.kerasModeler import ModelBuilder
from amber.modeler.dag import get_layer
# TODO
class KineticRate(tf.keras.layers.Conv1D):
def __init__(self, filters, kernel_size, kernel_initializer='zeros',):
super().__init__()
class KineticNeuralNetworkBuilder(ModelBuilder):
def __init__(self, kinn, session=None, output_op=None, n_feats=25, n_channels=4, replace_conv_by_fc=False):
"""convert a kinetic state graph (with state-specific input sequence ranges) to a neural network,
whose complexity is specified in rate_pwm_len
Parameters
----------
kinn : KineticModel
session : tf.Session
n_channels : int
replace_conv_by_fc : bool
Attributes
----------
kinn : KineticModel
reference to the biophysics model class
rate_pwm_len : list
list of pwm lengths. Together with kinn, the two variable that controls the neural network
session : tf.Session
underlying session for tensorflow and keras
output_op : amber.architect.Operation, or None
specific operation for converting activity to phenotype; if None, will learn from data by a Dense layer
model : tf.keras.Model, or None
keras model passing through the internal objects; will be None before initialized
layer_dict : dict
dictionary that maps layer name string to layer objects
input_ranges : list of tuples
a tuple of a pair of integers, ordered by the model.inputs
"""
# check kinn; specifies the states and ranges
#assert isinstance(kinn, KineticModel)
self.kinn = kinn
# check rate_mode
rate_pwm_len = [d.kernel_size for d in self.kinn.rates]
assert len(rate_pwm_len) == len(self.kinn.rates)
for pwm_len, rate in zip(*[rate_pwm_len, self.kinn.rates]):
assert isinstance(pwm_len, int)
#assert pwm_len <= rate.input_range[1] - rate.input_range[0], ValueError(
# f"pwm_len {pwm_len} must be smaller than rate input range {rate.input_range[0]} - {rate.input_range[1]}")
self.rate_pwm_len = rate_pwm_len
# start new session
self.session = session
if self.session is None:
self._reset_session()
else:
tf.keras.backend.set_session(self.session)
self.weight_initializer = 'zeros'
# placeholders
self.n_feats = n_feats
self.n_channels = n_channels
self.replace_conv_by_fc = replace_conv_by_fc
self.output_op = output_op
self.model = None
self.layer_dict = None
self.input_ranges = None
self.traindata = None
self.testdata = None
def _reset_session(self):
if self.session is not None:
self.session.close()
self.session = tf.Session()
tf.keras.backend.set_session(self.session)
def _build_inputs(self):
inputs_op = {}
self.input_ranges = []
for a, b in set([tuple(r.input_range) for r in self.kinn.rates]):
assert a >= 0
b = min(b, self.n_feats-1)
input_id = "input_%i_%i" % (a, b)
if input_id not in inputs_op:
assert b-a > 0, ValueError(f"input range less than 0 for {input_id}")
inputs_op["input_%i_%i" % (a, b)] = Input(
shape=(b - a, self.n_channels), name="input_%i_%i" % (a, b))
self.input_ranges.append((a, b))
return inputs_op
def _build_rates(self, inputs_op):
# build convs --> rates
rates = []
for i, rate in enumerate(self.kinn.rates):
seq_range = f"input_{rate.input_range[0]}_{min(self.n_feats-1, rate.input_range[1])}"
name = "k%i" % i
seq_range_d = min(self.n_feats-1,rate.input_range[1]) - rate.input_range[0]
if self.replace_conv_by_fc: # if replace is True, overwrite the rate dict
padding = "valid"
else:
padding = rate.__dict__.get("padding", "valid" if self.replace_conv_by_fc else "same")
filters = rate.__dict__.get("filters", 1)
conv_rate = Conv1D(filters=filters,
kernel_size=(seq_range_d,) if self.replace_conv_by_fc else self.rate_pwm_len[i],
activation="linear",
#use_bias=False,
kernel_initializer=self.weight_initializer() if callable(self.weight_initializer) else self.weight_initializer,
padding=padding,
name="conv_%s" % name)(
inputs_op[seq_range])
hidden_size = rate.__dict__.get("hidden_size", 0)
if hidden_size == 0:
conv_rate = Flatten()(conv_rate)
rate = Lambda(lambda x: tf.reduce_sum(x, axis=1, keepdims=True), name="sum_%s" % name)(conv_rate)
else:
reshape_fn = rate.__dict__.get("reshape_fn", 0)
reshape_map = {0: tf.keras.layers.Flatten, 1:tf.keras.layers.GlobalAveragePooling1D, 2:tf.keras.layers.GlobalMaxPooling1D}
conv_rate = reshape_map[reshape_fn]()(conv_rate)
h = Dense(units=hidden_size, activation='relu',
kernel_initializer=self.weight_initializer() if callable(self.weight_initializer) else self.weight_initializer,
name="hidden_%s"%name,
)(conv_rate)
rate = Lambda(lambda x: tf.reduce_sum(x, axis=-1, keepdims=True), name="sum_%s" % name)(h)
rates.append(rate)
return rates
def _build_king_altman(self, rates):
concat = Concatenate(name="gather_rates")(rates)
king_altman = Lambda(lambda x: tf.nn.softmax(
tf.matmul(x, tf.constant(self.kinn.get_ka_pattern_mat().transpose(), dtype=tf.float32))),
name="KingAltman")(concat)
return concat, king_altman
def _build_activity(self, rates, king_altman):
# build activity
activity = []
rate_contrib_mat = self.kinn.get_rate_contrib_matrix()
for k in range(rate_contrib_mat.shape[1]):
# get each column as mask
mask = rate_contrib_mat[:, k]
assert np.sum(mask) <= 1, "k=%i, mask error for %s" % (k, mask)
if np.sum(mask) == 0:
continue
# print(np.where(mask == 1))
rate_index = np.where(mask == 1)[0][0]
rate_layer = rates[rate_index]
# print(rate_layer)
rate_layer = Lambda(
lambda x: tf.math.exp(x),
name=f'exp_k{rate_index}_{k}')(rate_layer)
ka_slice = Lambda(
lambda x: tf.gather(
x, [k], axis=-1), name=f"KA_slice_{k}")(king_altman)
intermediate = Concatenate(name=f"gather_act_{k}")([
ka_slice, rate_layer])
activity.append(
Lambda(
lambda x: tf.reduce_prod(x, axis=-1, keepdims=True),
name=f"prod_act_{k}")(intermediate)
)
return activity
def _build_outputs(self, activity):
# build outputs
# TODO: change the hard-coded output
if len(activity) > 1:
if self.output_op is None:
output = Dense(units=1, activation="linear",
kernel_initializer='zeros',
name="output")(
Concatenate()(activity))
else:
x = Concatenate()(activity)
output = get_layer(x=x, state=self.output_op, with_bn=False)
else:
if self.output_op is None:
output = Dense(
units=1,
activation="linear",
kernel_initializer='zeros',
name="output")(
activity[0])
else:
output = get_layer(x=activity[0], state=self.output_op, with_bn=False)
return output
def build(self, optimizer=None, output_act=False, plot=False, return_intermediate=False):
"""build the machine learning model
Parameters
----------
optimizer : str, dict, or keras.optimizer
optimizer to use, can take various forms. If left None, then will use SGD with a
learning rate of 0.1 and momentum 0.95
output_act : bool
if true, output activity instead of phenotype
"""
inputs_op = self._build_inputs()
rates = self._build_rates(inputs_op)
gather_rates, king_altman = self._build_king_altman(rates)
activity = self._build_activity(rates, king_altman)
if return_intermediate:
return {
'inputs_op': inputs_op,
'gather_rates': gather_rates,
'KingAltman': king_altman,
'activity': activity}
optimizer = optimizer or SGD(lr=0.1, momentum=0.95, decay=1e-5, clipnorm=1.0)
if output_act is True:
self.model = Model([inputs_op[j] for j in inputs_op], [
activity[k] for k in range(len(activity))])
self.model.compile(
loss='mse',
optimizer=optimizer
)
else:
output = self._build_outputs(activity)
self.model = Model([inputs_op[j] for j in inputs_op], output)
self.model.compile(
# TODO
#loss='binary_crossentropy',
loss='mse',
optimizer=optimizer
)
self.layer_dict = {l.name: l for l in self.model.layers}
if plot is True:
plot_model(self.model, to_file='model.png')
def load_data(self, fp, output_act=False):
"""populate the instance's data attributes for training and testing
Parameters
----------
fp : str
filepath for data csv
output_act : bool
if true, use activity (currently hard-coded as column 50) as output; otherwise use phenotype
(currently hard-coded as column 51) as output
"""
# one-hot encoder
x_set = [['A'], ['C'], ['G'], ['T']]
enc = OneHotEncoder(sparse=False, categories='auto')
_ = enc.fit(x_set)
# read in data
with open(fp, 'r') as f:
data = pd.read_csv(f, index_col=0)
data.dropna(inplace=True)
data['seq_ohe'] = [enc.transform(
[[t] for t in row[1][0:50]]) for row in data.iterrows()]
if output_act is True:
data['obs'] = data['50']
data['obs'] /= data['obs'].max()
else:
data['obs'] = data['51']
#print(data['obs'].describe())
# split train-test
gen_df = data
X_train, X_test, y_train, y_test = train_test_split(
gen_df['seq_ohe'].values, gen_df['obs'].values, test_size=0.2, random_state=777)
input_seqs_ohe = []
for i in range(len(X_train)):
input_seqs_ohe += [X_train[i]]
test_seqs_ohe = []
for i in range(len(X_test)):
test_seqs_ohe += [X_test[i]]
# blockify
input_seqs_ohe = np.array(input_seqs_ohe)
test_seqs_ohe = np.array(test_seqs_ohe)
x_train_b = self.blockify_seq_ohe(input_seqs_ohe)
x_test_b = self.blockify_seq_ohe(test_seqs_ohe)
self.traindata = x_train_b, y_train, input_seqs_ohe
self.testdata = x_test_b, y_test, test_seqs_ohe
def blockify_seq_ohe(self, seq_ohe):
"""separate seq matrices into blocks according to input_ranges
Parameters
----------
seq_ohe : np.array
a np.ndarray that stores one-hoe encoded sequences
Returns
-------
list
each element is a np.array used as input that matches input_ranges
"""
data = []
for a, b in self.input_ranges:
data.append(seq_ohe[:, a:b])
return data
def predict(self, data):
assert isinstance(data, np.ndarray)
blocks = self.blockify_seq_ohe(data)
return self.model.predict(blocks)
class KineticMatrixLayer(tf.keras.layers.Layer):
"""Custom Layer that converts kinetic rates into a kinetic matrix
"""
def __init__(self, num_rates: int, num_states: int, scatter_ind: list, regularize_eigenval_ratio : float, top_k_eigvals=3):
super().__init__()
self.num_rates = num_rates
self.num_states = num_states
self.scatter_ind = scatter_ind
self.top_k_eigvals = top_k_eigvals
self.regularize_eigenval_ratio = regularize_eigenval_ratio
def build(self, *args):
super().build(*args)
@tf.autograph.experimental.do_not_convert
def _scatter_single_sample(self, input_tensor):
updates = []
inds = []
assert input_tensor.shape[0] == self.num_rates
for i in range(self.num_rates):
val = input_tensor[i]
for ind, sign in self.scatter_ind[i]:
updates.append(val*sign)
inds.append(ind)
kinetic_matrix = tf2.scatter_nd(inds, updates, (self.num_states, self.num_states))
eigvals = tf2.math.real(tf2.linalg.eigvals(kinetic_matrix))
return eigvals
def call(self, inputs: tf.Tensor):
batch_eigvals = tf2.map_fn(self._scatter_single_sample, inputs)
batch_top_ev = tf2.math.top_k(batch_eigvals, k=self.top_k_eigvals, sorted=True)
if self.regularize_eigenval_ratio > 0 :
# all eigvals <= 0; larger is ~0, smaller is more negative
# minimize loss -> lam_1 / lam_2 ~ 0
self.add_loss(self.regularize_eigenval_ratio *
tf.reduce_mean(tf.math.divide_no_nan(batch_top_ev.values[:,1], batch_top_ev.values[:,2])))
return batch_top_ev.values
class KineticEigenModelBuilder(KineticNeuralNetworkBuilder):
def __init__(self, kinn, session=None, output_op=None, n_feats=25, n_channels=4, replace_conv_by_fc=False,
regularize_eigenval_ratio=0.001):
super().__init__(kinn=kinn, session=session, output_op=output_op, n_feats=n_feats, n_channels=n_channels,
replace_conv_by_fc=replace_conv_by_fc)
#self.weight_initializer = 'random_normal'
self.weight_initializer = lambda: tf.keras.initializers.RandomNormal(stddev=0.01)
self.regularize_eigenval_ratio = regularize_eigenval_ratio
# overwrite activity without going through KA
def _build_activity(self,rates):
concat = tf2.keras.layers.Concatenate(name='gather_rates')(rates)
concat = tf2.keras.layers.Lambda(lambda x: tf2.exp(x), name="exp_rates")(concat)
scatter_ind = [r.scatter_nd for r in self.kinn.rates]
kinetic_eig = KineticMatrixLayer(num_rates=len(rates), num_states=len(self.kinn.states),
scatter_ind=scatter_ind, regularize_eigenval_ratio=self.regularize_eigenval_ratio)(concat)
return [kinetic_eig]
# overwrite build to ignore KA
def build(self, optimizer=None, output_act=False, plot=False):
inputs_op = self._build_inputs()
rates = self._build_rates(inputs_op)
activity = self._build_activity(rates,)
optimizer = optimizer or tf2.keras.optimizers.SGD(lr=0.1, momentum=0.95, decay=1e-5)
if output_act is True:
self.model = tf2.keras.models.Model([inputs_op[j] for j in inputs_op], [
activity[k] for k in range(len(activity))])
self.model.compile(
loss='mae',
optimizer=optimizer
)
else:
output = self._build_outputs(activity)
self.model = tf2.keras.models.Model([inputs_op[j] for j in inputs_op], output)
self.model.compile(
# TODO
#loss='binary_crossentropy',
loss='mae',
optimizer=optimizer
)
self.layer_dict = {l.name: l for l in self.model.layers}
if plot is True:
plot_model(self.model, to_file='model.png')
| 17,038 | 40.96798 | 138 | py |
Elektrum | Elektrum-main/src/runAmber_cnn.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Example wrapper `Amber` use for searching cas9 off-target kinetic prediction
FZZ, Jan 28, 2022
"""
from amber import Amber
from amber.utils import run_from_ipython, get_available_gpus
from amber.architect import ModelSpace, Operation
import sys
import os
import pickle
import copy
import numpy as np
import scipy.stats as ss
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import SGD, Adam
#from keras.optimizers import SGD, Adam
import argparse
import pickle
from src.data import load_finkelstein_data as get_data
def get_model_space_long():
# Setup and params.
state_space = ModelSpace()
default_params = {}
param_list = [
# Block 1:
[
{"filters": 16, "kernel_size": 1, "activation": "relu", "padding": "valid"},
{"filters": 16, "kernel_size": 3, "activation": "relu", "padding": "valid"},
{"filters": 16, "kernel_size": 7, "activation": "relu", "padding": "valid"},
{"filters": 16, "kernel_size": 1, "activation": "tanh", "padding": "valid"},
{"filters": 16, "kernel_size": 3, "activation": "tanh", "padding": "valid"},
{"filters": 16, "kernel_size": 7, "activation": "tanh", "padding": "valid"},
],
# Block 2:
[
{"filters": 64, "kernel_size": 1, "activation": "relu", "padding": "valid"},
{"filters": 64, "kernel_size": 3, "activation": "relu", "padding": "valid"},
{"filters": 64, "kernel_size": 7, "activation": "relu", "padding": "valid"},
{"filters": 64, "kernel_size": 1, "activation": "tanh", "padding": "valid"},
{"filters": 64, "kernel_size": 3, "activation": "tanh", "padding": "valid"},
{"filters": 64, "kernel_size": 7, "activation": "tanh", "padding": "valid"},
],
# Block 3:
[
{"filters": 256, "kernel_size": 1, "activation": "relu", "padding": "valid"},
{"filters": 256, "kernel_size": 3, "activation": "relu", "padding": "valid"},
{"filters": 256, "kernel_size": 7, "activation": "relu", "padding": "valid"},
{"filters": 256, "kernel_size": 1, "activation": "tanh", "padding": "valid"},
{"filters": 256, "kernel_size": 3, "activation": "tanh", "padding": "valid"},
{"filters": 256, "kernel_size": 7, "activation": "tanh", "padding": "valid"},
],
# Block 4:
#[
# {"filters": 256, "kernel_size": 1, "activation": "relu", "padding": "valid"},
# {"filters": 256, "kernel_size": 3, "activation": "relu", "padding": "valid"},
# {"filters": 256, "kernel_size": 7, "activation": "relu", "padding": "valid"},
# {"filters": 256, "kernel_size": 1, "activation": "tanh", "padding": "valid"},
# {"filters": 256, "kernel_size": 3, "activation": "tanh", "padding": "valid"},
# {"filters": 256, "kernel_size": 7, "activation": "tanh", "padding": "valid"},
#],
]
# Build state space.
layer_embedding_sharing = {}
conv_seen = 0
for i in range(len(param_list)):
# Build conv states for this layer.
conv_states = []
for j in range(len(param_list[i])):
d = copy.deepcopy(default_params)
for k, v in param_list[i][j].items():
d[k] = v
conv_states.append(Operation('conv1d', name="conv{}".format(conv_seen), **d))
if conv_seen > 0:
conv_states.append(Operation('identity', name="id{}".format(conv_seen)))
else:
conv_states.append(Operation('conv1d', name="conv{}".format(conv_seen), activation="linear", filters=16, kernel_size=1))
state_space.add_layer(conv_seen*2, conv_states)
if i > 0:
layer_embedding_sharing[conv_seen*2] = 0
conv_seen += 1
# Add pooling states, if is the last conv.
if i == len(param_list) - 1:
pool_states = [
Operation('Flatten'),
Operation('GlobalMaxPool1D'),
Operation('GlobalAvgPool1D')
]
state_space.add_layer(conv_seen*2-1, pool_states)
else:
# Add dropout
state_space.add_layer(conv_seen*2-1, [
Operation('Identity'),
Operation('Dropout', rate=0.1),
Operation('Dropout', rate=0.3),
Operation('Dropout', rate=0.5)
])
if i > 0:
layer_embedding_sharing[conv_seen*2-1] = 1
# Add final classifier layer.
state_space.add_layer(conv_seen*2, [
Operation('Dense', units=64, activation='relu'),
Operation('Dense', units=32, activation='relu'),
Operation('Identity')
])
return state_space, layer_embedding_sharing
def robust_pearsonr(y_true, y_score):
reward = ss.pearsonr(y_true, y_score.flatten())[0]
if np.isnan(reward):
reward = 0
return reward
def amber_app(wd, target="wtCas9_cleave_rate_log", make_switch=False, run=False):
# First, define the components we need to use
print("switch gRNA_1 to testing and gRNA_2 to training:", make_switch)
res = get_data(target=target, make_switch=make_switch, logbase=10, include_ref=False)
# unpack data tuple
(x_train, y_train), (x_test, y_test) = res
type_dict = {
'controller_type': 'GeneralController',
'knowledge_fn_type': 'zero',
'reward_fn_type': 'LossAucReward',
# FOR RL-NAS
'modeler_type': 'KerasModelBuilder',
'manager_type': 'GeneralManager',
'env_type': 'ControllerTrainEnv'
}
# Next, define the specifics
os.makedirs(wd, exist_ok=True)
input_node = [
Operation('input', shape=(25,9), name="input")
]
output_node = [
Operation('dense', units=1, activation='linear', name="output")
]
model_compile_dict = {
'loss': 'mae',
'optimizer': 'adam',
}
model_space, layer_embedding_sharing = get_model_space_long()
batch_size = 768
use_ppo = True
specs = {
'model_space': model_space,
'controller': {
'share_embedding': layer_embedding_sharing,
'with_skip_connection': False,
'skip_weight': None,
'lstm_size': 64,
'lstm_num_layers': 1,
'kl_threshold': 0.01,
'train_pi_iter': 50 if use_ppo else 10,
'optim_algo': 'adam',
'rescale_advantage_by_reward': False,
'temperature': 2.0,
'tanh_constant': 1.5,
'buffer_size': 10, # FOR RL-NAS
'batch_size': 5,
'use_ppo_loss': use_ppo
},
'model_builder': {
'batch_size': batch_size,
'inputs_op': input_node,
'outputs_op': output_node,
'model_compile_dict': model_compile_dict,
},
'knowledge_fn': {'data': None, 'params': {}},
'reward_fn': {'method': robust_pearsonr},
'manager': {
'data': {
'train_data': (x_train, y_train),
'validation_data': (x_test, y_test),
},
'params': {
'epochs': 400,
'fit_kwargs': {
'earlystop_patience': 30,
#'max_queue_size': 50,
#'workers': 3
},
'child_batchsize': batch_size,
'store_fn': 'model_plot',
'working_dir': wd,
'verbose': 0
}
},
'train_env': {
'max_episode': 350,
'max_step_per_ep': 10,
'working_dir': wd,
'time_budget': "48:00:00",
'with_skip_connection': False,
'save_controller_every': 1
}
}
# finally, run program
amb = Amber(types=type_dict, specs=specs)
if run:
amb.run()
return amb
if __name__ == '__main__':
if not run_from_ipython():
parser = argparse.ArgumentParser(description="Script for AMBER-search of Single-task runner")
parser.add_argument("--wd", type=str, help="working directory")
parser.add_argument("--switch", type=int, default=0, help="switch to train on gRNA2, test on gRNA1; default 0-false")
parser.add_argument("--target", choices="""wtCas9_cleave_rate_log
Cas9_enh_cleave_rate_log
Cas9_hypa_cleave_rate_log
Cas9_HF1_cleave_rate_log
wtCas9_cleave_rate_log_specificity
Cas9_enh_cleave_rate_log_specificity
Cas9_hypa_cleave_rate_log_specificity
Cas9_HF1_cleave_rate_log_specificity
wtCas9_ndABA
Cas9_enh_ndABA
Cas9_hypa_ndABA
Cas9_HF1_ndABA""".split(), default='wtCas9_cleave_rate_log', type=str, help="target to train")
args = parser.parse_args()
pickle.dump(args, open(os.path.join(args.wd, "args.pkl"), "wb"))
make_switch = args.switch!=0
amber_app(
wd=args.wd,
target=args.target,
make_switch=make_switch,
run=True
)
| 9,410 | 35.196154 | 132 | py |
Elektrum | Elektrum-main/src/reload.py | from src.kinetic_model import KineticModel
from src.neural_network_builder import KineticNeuralNetworkBuilder, KineticEigenModelBuilder
import sys
import time
import pickle
import os
from amber.utils import corrected_tf as tf
import numpy as np
from amber.utils import run_from_ipython
import copy
def reload_from_dir(wd, manager_kwargs, sess=None, model_fn=None, load_weights=True, verbose=False):
# unpack keyword args
n_channels = manager_kwargs.get("n_channels", 9)
n_feats = manager_kwargs.get("n_feats", 25)
replace_conv_by_fc = manager_kwargs.get("replace_conv_by_fc", False)
opt = manager_kwargs.get("optimizer", None)
output_op = manager_kwargs.get("output_op", None)
model_params = pickle.load(open(os.path.join(wd, "AmberSearchBestModel_config.pkl"), "rb"))
kinn = KineticModel(model_params)
model_fn = model_fn or KineticNeuralNetworkBuilder
mb = model_fn(kinn=kinn, session=sess,
output_op=output_op,
n_feats=n_feats,
n_channels=n_channels,
replace_conv_by_fc=replace_conv_by_fc
)
opt = opt() if opt else 'adam'
mb.build(optimizer=opt, plot=False, output_act=False)
if load_weights is True:
if verbose: print("loaded searched model")
mb.model.load_weights(os.path.join(wd, "AmberSearchBestModel.h5"))
#mb.model.summary()
return mb
def get_rate_model_from_kinn(kinn):
layer_dict = {l.name:l for l in kinn.model.layers}
rate_mod = tf.keras.models.Model(inputs=kinn.model.inputs, outputs=layer_dict['gather_rates'].output)
matched = np.zeros((1,25,9))
rates = rate_mod.predict(kinn.blockify_seq_ohe(matched))
return rate_mod, rates
def retrain_last_layer(wd, manager_kwargs, model_fn, new_output_op, new_name_suffix, datas=None, sess=None):
"""
Example
-------
.. code: python
from silence_tensorflow import silence_tensorflow; silence_tensorflow()
import os
import tensorflow as tf
from src.neural_network_builder import KineticNeuralNetworkBuilder
from src.reload import retrain_last_layer
from src.data import load_finkelstein_data as get_data
wds = ["./outputs/final/%s"%x for x in os.listdir("outputs/final/") if x.endswith("gRNA1")]
datas = get_data(target="wtCas9_cleave_rate_log", make_switch=False, logbase=10)
manager_kwargs = {'n_feats': 25, 'n_channels': 9,}
new_output_op = lambda: tf.keras.layers.Dense(units=1, activation="linear", name="output_nonneg", kernel_constraint=tf.keras.constraints.NonNeg())
for wd in wds:
print(wd)
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
retrain_last_layer(wd=wd, manager_kwargs=manager_kwargs, new_output_op=new_output_op, new_name_suffix="linear_offset", model_fn=KineticNeuralNetworkBuilder, datas=datas)
"""
manager_kw2 = copy.copy(manager_kwargs)
manager_kw2['output_op'] = new_output_op
kinn = reload_from_dir(wd=wd, manager_kwargs=manager_kw2, model_fn=model_fn,
load_weights=False, sess=sess)
if datas is not None:
(x_train, y_train), (x_test, y_test) = datas
t0 = time.time()
checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath=os.path.join(wd, f"bestmodel_{new_name_suffix}.h5"), mode='min', verbose=0, save_best_only=True, save_weights_only=True)
earlystopper = tf.keras.callbacks.EarlyStopping( monitor="val_loss", mode='min', patience=50, verbose=0)
x_train_b = kinn.blockify_seq_ohe(x_train)
x_test_b = kinn.blockify_seq_ohe(x_test)
hist = kinn.model.fit(
x_train_b, y_train,
epochs=3000,
batch_size=128,
validation_data=[x_test_b, y_test],
callbacks=[checkpointer, earlystopper],
verbose=0
)
print("training took %.3f secs.." % (time.time()-t0))
kinn.model.load_weights(os.path.join(wd, f"bestmodel_{new_name_suffix}.h5"))
return kinn
if __name__ == "__main__" and not run_from_ipython():
wd = sys.argv[1]
reload_from_dir(wd=wd)
| 4,152 | 41.814433 | 188 | py |
Elektrum | Elektrum-main/src/runAmber_simkinn.py | #!/usr/bin/env python
# coding: utf-8
# # Probablistic model building genetic algorithm
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
from src.kinetic_model import KineticModel, modelSpace_to_modelParams
from src.neural_network_builder import KineticNeuralNetworkBuilder, KineticEigenModelBuilder
from src.neural_search import search_env
from src.model_spaces import get_sim_model_space as get_model_space
from src.reload import reload_from_dir
import warnings
warnings.filterwarnings('ignore')
import yaml
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import pandas as pd
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import os
import sys
import argparse
import pickle
import amber
print(amber.__version__)
from amber.architect import pmbga
def get_data(fp, logbase=10, noise_sigma=0):
if fp.endswith("csv"):
input_seqs_ohe, y_train, test_seqs_ohe, y_test = load_csv(fp)
elif fp.endswith("tsv"):
input_seqs_ohe, y_train, test_seqs_ohe, y_test = load_tsv(fp)
else:
raise Exception("filepath not understood: %s" % fp)
if logbase is not None:
y_train = np.log(y_train) / np.log(logbase)
y_test = np.log(y_test) / np.log(logbase)
if noise_sigma > 0:
y_train += np.random.normal(0, noise_sigma, size=len(y_train))
y_test += np.random.normal(0, noise_sigma, size=len(y_test))
return (input_seqs_ohe, y_train), (test_seqs_ohe, y_test)
# data loaders for different formatted synthetic data
def load_csv(fp, output_act=True):
# one-hot encoder
x_set = [['A'], ['C'], ['G'], ['T']]
enc = OneHotEncoder(sparse=False, categories='auto')
_ = enc.fit(x_set)
# read in data
with open(fp, 'r') as f:
data = pd.read_csv(f, index_col=0)
data.dropna(inplace=True)
data['seq_ohe'] = [enc.transform(
[[t] for t in row[1][0:50]]) for row in data.iterrows()]
if output_act is True:
data['obs'] = data['50']
data['obs'] /= data['obs'].max()
else:
data['obs'] = data['51']
#print(data['obs'].describe())
# split train-test
gen_df = data
X_train, X_test, y_train, y_test = train_test_split(
gen_df['seq_ohe'].values, gen_df['obs'].values, test_size=0.2, random_state=777)
input_seqs_ohe = []
for i in range(len(X_train)):
input_seqs_ohe += [X_train[i]]
test_seqs_ohe = []
for i in range(len(X_test)):
test_seqs_ohe += [X_test[i]]
input_seqs_ohe = np.array(input_seqs_ohe)
test_seqs_ohe = np.array(test_seqs_ohe)
return input_seqs_ohe, y_train, test_seqs_ohe, y_test
def load_tsv(fp):
# one-hot encoder
x_set = [['A'], ['C'], ['G'], ['T']]
enc = OneHotEncoder(sparse=False, categories='auto')
_ = enc.fit(x_set)
# read in data
data = pd.read_table(fp)
data.dropna(inplace=True)
data['seq_ohe'] = [enc.transform(
[[t] for t in row['seq']]) for _, row in data.iterrows()]
data['obs'] = - data['first_eigval']
#print(data['obs'].describe())
# split train-test
gen_df = data
X_train, X_test, y_train, y_test = train_test_split(
gen_df['seq_ohe'].values, gen_df['obs'].values, test_size=0.2, random_state=777)
input_seqs_ohe = []
for i in range(len(X_train)):
input_seqs_ohe += [X_train[i]]
test_seqs_ohe = []
for i in range(len(X_test)):
test_seqs_ohe += [X_test[i]]
input_seqs_ohe = np.array(input_seqs_ohe)
test_seqs_ohe = np.array(test_seqs_ohe)
return input_seqs_ohe, y_train, test_seqs_ohe, y_test
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--disable-posterior', action="store_true", default=False)
parser.add_argument('--patience', type=int, default=200, required=False)
parser.add_argument('--max-gen', type=int, default=600, required=False)
parser.add_argument('--samps-per-gen', type=int, default=5, required=False)
parser.add_argument('--wd', type=str, required=True)
parser.add_argument('--data-file', type=str, required=True)
parser.add_argument('--param-file', type=str, required=True)
parser.add_argument('--use-sink-state', action="store_true", default=False)
parser.add_argument('--logbase', type=float, default=10)
parser.add_argument('--noise-sigma', type=float, default=0)
args = parser.parse_args()
os.makedirs(args.wd, exist_ok=True)
pickle.dump(args, open(os.path.join(args.wd, "args.pkl"), "wb"))
return args
def main():
args = parse()
kinn_model_space = get_model_space(use_sink_state=args.use_sink_state)
print("use sink state:", args.use_sink_state)
print(kinn_model_space)
controller = pmbga.ProbaModelBuildGeneticAlgo(
model_space=kinn_model_space,
buffer_type='population',
buffer_size=50, # buffer size controlls the max history going back
batch_size=1, # batch size does not matter in this case; all arcs will be retrieved
ewa_beta=0.0 # ewa_beta approximates the moving average over 1/(1-ewa_beta) prev points
)
res = get_data(fp=args.data_file, logbase=args.logbase, noise_sigma=args.noise_sigma)
# unpack data tuple
(x_train, y_train), (x_test, y_test) = res
logbase = args.logbase
if args.use_sink_state:
output_op = lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(tf.clip_by_value(tf.reshape(- x[:,1], (-1,1)), 10**-16, 10**3))/np.log(logbase), name="output_slice")
else:
output_op = lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(tf.clip_by_value(x, 10**-16, 10**3))/np.log(logbase), name="output_log")
# trainEnv parameters
evo_params = dict(
model_fn = KineticEigenModelBuilder if args.use_sink_state else KineticNeuralNetworkBuilder,
samps_per_gen = args.samps_per_gen, # how many arcs to sample in each generation; important
max_gen = args.max_gen,
patience = args.patience,
n_warmup_gen = 0,
train_data = (x_train, y_train),
test_data = (x_test, y_test)
)
# this learning rate is trickier than usual, for eigendecomp to work
initial_learning_rate = 0.05
batch_size = 2048
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=10*int(20000/batch_size), # decrease every 10 epochs
decay_rate=0.9,
staircase=True)
manager_kwargs = {
'output_op': output_op,
'n_feats': 50,
'n_channels': 4,
'batch_size': batch_size,
'epochs': 20 if args.use_sink_state else 100,
'earlystop': 5,
'optimizer': lambda: tf.keras.optimizers.Adam(learning_rate=lr_schedule, clipnorm=1.0),
'verbose': 0
}
controller, hist, stat_df = search_env(
controller=controller,
wd=args.wd,
evo_params=evo_params,
manager_kwargs=manager_kwargs,
disable_posterior_update=args.disable_posterior
)
# plot the best model
mb = reload_from_dir(wd=args.wd, manager_kwargs=manager_kwargs, model_fn=evo_params['model_fn'])
tf.keras.utils.plot_model(mb.model, to_file=os.path.join(args.wd, "model.png"))
y_hat = mb.predict(x_test).flatten()
h = sns.jointplot(y_test, y_hat)
h.set_axis_labels("obs", "pred", fontsize=16)
p = ss.pearsonr(y_hat, y_test)
h.fig.suptitle("Testing prediction, pcc=%.3f"%p[0], fontsize=16)
plt.savefig(os.path.join(args.wd, "test_pred.png"))
# for sim data: analyze post model distr
print("plot model posterior")
plot_model_post_distr(controller=controller, args=args)
return controller
def plot_model_post_distr(controller, args):
# ground-truth params for synthetic data
with open(args.param_file, 'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
kinn_gr = KineticModel(config)
# START SITE
fig, axs_ = plt.subplots(3,3, figsize=(15,15))
axs = [axs_[i][j] for i in range(len(axs_)) for j in range(len(axs_[i]))]
for k in controller.model_space_probs:
if k[-1] == 'RANGE_ST':
try:
d = controller.model_space_probs[k].sample(size=1000)
except:
continue
ax = axs[k[0]]
_ = sns.distplot(d, label="Post", ax=ax)
_ = sns.distplot(controller.model_space_probs[k].prior_dist, label="Prior", ax=ax)
if k[0] < 7:
_ = ax.axvline(x=kinn_gr.model_params['Rates'][k[0]]['input_range'][0],linestyle='--', color='grey')
_ = ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mean', str(np.mean(d)),
'\nGround truth', str(kinn_gr.model_params['Rates'][k[0]]['input_range'][0])])
)
else:
_ = ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mean', str(np.mean(d))]))
#_ = ax.set_xlim(0,50)
fig.tight_layout()
fig.savefig(os.path.join(args.wd, "syn_range_st.png"))
# CONV RANGE
fig, axs_ = plt.subplots(3,3, figsize=(15,15))
axs = [axs_[i][j] for i in range(len(axs_)) for j in range(len(axs_[i]))]
for k in controller.model_space_probs:
if k[-1] == 'RANGE_D':
d = controller.model_space_probs[k].sample(size=1000)
ax = axs[k[0]]
_ = sns.distplot(d, ax=ax)
_ = sns.distplot(controller.model_space_probs[k].prior_dist, label="Prior", ax=ax)
if k[0] < 7:
D = kinn_gr.model_params['Rates'][k[0]]['input_range'][1] - kinn_gr.model_params['Rates'][k[0]]['input_range'][0]
_ = ax.axvline(x=D,linestyle='--', color='grey')
_ = ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mean', str(np.mean(d)), '\nGround truth', str(D)])
)
else:
_ = ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mean', str(np.mean(d))]))
#_ = ax.set_xlim(0,20)
fig.tight_layout()
fig.savefig(os.path.join(args.wd, "syn_range_d.png"))
# EDGE PRESENCE
fig, axs_ = plt.subplots(3,3, figsize=(15,15))
axs = [axs_[i][j] for i in range(len(axs_)) for j in range(len(axs_[i]))]
for k in controller.model_space_probs:
if k[-1] == 'EDGE':
d = controller.model_space_probs[k].sample(size=1000)
ax = axs[k[0]]
sns.distplot(d, ax=ax)
sns.distplot(controller.model_space_probs[k].prior_dist, ax=ax)
ax.set_title(
' '.join(['Rate ID', str(k[0]), '\nPosterior mean', str(np.mean(d))]))
#_ = ax.set_xlim(0,20)
fig.tight_layout()
fig.savefig(os.path.join(args.wd, "syn_edge.png"))
if __name__ == "__main__":
if not amber.utils.run_from_ipython():
main()
| 11,053 | 38.906137 | 174 | py |
Elektrum | Elektrum-main/src/crispr_kinn_predict.py | """a script that wraps up the calling and parsing of CRISPR-OffTarget predictions
FZZ, 2022.03.15
"""
import numpy as np
import pandas as pd
import warnings
from src.reload import reload_from_dir
from src.neural_network_builder import KineticNeuralNetworkBuilder, KineticEigenModelBuilder
#import tensorflow as tf
from amber.utils import corrected_tf as tf
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import average_precision_score, roc_auc_score
import scipy.stats as ss
from src.encode_seq import get_letter_index, make_alignment, featurize_alignment
config = {
'kinn_1': "outputs/bak_20220515/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep2-gRNA1",
'kinn_2': "outputs/bak_20220515/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep1-gRNA2",
'dcnn_1': "outputs/CNN-wtCas9_cleave_rate_log-0/",
'dcnn_2': "outputs/CNN-wtCas9_cleave_rate_log-1/",
}
# trainEnv parameters
evo_params = dict(
model_fn = KineticNeuralNetworkBuilder,
#model_fn = KineticEigenModelBuilder,
samps_per_gen = 10, # how many arcs to sample in each generation; important
max_gen = 200,
patience = 50,
n_warmup_gen = 0,
#train_data = (x_train, y_train),
#test_data = (x_test, y_test)
)
# manager configs
manager_kwargs={
'output_op': lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(x)/np.log(10), name="output_log"), # change the clip as well
#'output_op': lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(tf.math.maximum(tf.reshape(- x[:,1], (-1,1)), 10**-5))/np.log(10), name="output_slice"),
'n_feats': 25, # remember to change this!!
'n_channels': 9,
'batch_size': 128,
'epochs': 30,
'earlystop': 10,
'verbose': 0
}
def plot_dataframe(data):
fig, axs = plt.subplots(1,2, figsize=(13,6))
plot_df = data.query('Read>0')
ax = axs[1]
sns.scatterplot(x='kinn', y=np.log10(plot_df.Read+1), hue='sgRNA_type', alpha=0.5, data=plot_df, ax=ax)
ax.set_ylabel('log10(Read+1)')
ax.set_xlabel('KINN log10(cleavage rate)')
ax.legend(bbox_to_anchor=(1.01, 1), loc='upper left', prop={'size': 10})
pcc = ss.pearsonr(plot_df['kinn'], np.log10(plot_df['Read']+1))
ax.set_title("Positive Set Predictions (OffTargets)\nPearson=%.3f, p=%.3f, n=%i" % (pcc[0], pcc[1], plot_df.shape[0]))
#ax = axs[1,1]
#sns.scatterplot(x='dcnn', y=np.log10(plot_df.Read+1), hue='sgRNA_type', alpha=0.5, data=plot_df, ax=ax)
#ax.set_ylabel('log10(Read+1)')
#ax.set_xlabel('DCNN log10(cleavage rate)')
#ax.legend(bbox_to_anchor=(1.01, 1), loc='upper left', prop={'size': 10})
#pcc = ss.pearsonr(plot_df['dcnn'], np.log10(plot_df['Read']+1))
#ax.set_title("Positive Set Predictions (OffTargets)\nPearson=%.3f, p=%.3f, n=%i" % (pcc[0], pcc[1], plot_df.shape[0]))
plot_df = data.query('Read==0')
q = np.arange(0,100.01,0.01)
ax = axs[0]
sns.lineplot(x=q, y=np.percentile(plot_df.kinn, q), marker="o", markersize=6, ax=ax)
ax.set_xlabel("Percentile")
ax.set_ylabel("KINN log10(cleavage rate)")
ax.set_title("Negative Set Predictions (non-edited)\nn=%i"%(plot_df.shape[0]))
#ax = axs[1,0]
#sns.lineplot(x=q, y=np.percentile(plot_df.dcnn, q), marker="o", markersize=6, ax=ax)
#ax.set_xlabel("Percentile")
#ax.set_ylabel("DCNN log10(cleavage rate)")
#ax.set_title("Negative Set Predictions (non-edited)\nn=%i"%(plot_df.shape[0]))
fig.tight_layout()
return fig
def predict_on_dataframe(data, is_aligned=True):
"""Assumes the first and second columns are sgRNA,OffTarget respectively
"""
ltidx = get_letter_index(build_indel=True)
if is_aligned:
alignments = [x[1].str[::-1].tolist() for x in tqdm(data.iloc[:,[0,1]].iterrows(), total=data.shape[0])]
else:
# sanitize sequences and perform alignments
raise NotImplementedError
fea = featurize_alignment(alignments, ltidx)
# load kinn
sess = tf.Session()
kinn_1 = reload_from_dir(wd=config['kinn_1'],
sess=sess,
manager_kwargs=manager_kwargs,
model_fn=evo_params['model_fn']
)
kinn_2 = reload_from_dir(wd=config['kinn_2'],
sess=sess,
manager_kwargs=manager_kwargs,
model_fn=evo_params['model_fn']
)
# load cnn
wd = config['dcnn_1']
train_hist = pd.read_table(os.path.join(wd, "train_history.csv"), sep=",", header=None)
best_trial_id = train_hist.sort_values(2, ascending=False).head(1)[0]
dcnn_1 = tf.keras.models.load_model(os.path.join(wd, "weights", "trial_%i"%best_trial_id, "bestmodel.h5"))
wd = config['dcnn_2']
train_hist = pd.read_table(os.path.join(wd, "train_history.csv"), sep=",", header=None)
best_trial_id = train_hist.sort_values(2, ascending=False).head(1)[0]
dcnn_2 = tf.keras.models.load_model(os.path.join(wd, "weights", "trial_%i"%best_trial_id, "bestmodel.h5"))
data['kinn_1'] = kinn_1.predict(fea)
data['kinn_2'] = kinn_2.predict(fea)
data['dcnn_1'] = dcnn_1.predict(fea)
data['dcnn_2'] = dcnn_2.predict(fea)
data['kinn'] = data[['kinn_1', 'kinn_2']].mean(axis=1)
data['dcnn'] = data[['dcnn_1', 'dcnn_2']].mean(axis=1)
metrics = {
'auroc.kinn': roc_auc_score(y_true=data.label, y_score=data.kinn),
'aupr.kinn': average_precision_score(y_true=data.label, y_score=data.kinn),
'auroc.cnn': roc_auc_score(y_true=data.label, y_score=data.dcnn),
'aupr.cnn': average_precision_score(y_true=data.label, y_score=data.dcnn),
'num.total': data.shape[0],
'num.unique_gRNAs': len(set(data['sgRNA_type'])),
'num.offtarget': data.label.sum()
}
return data, fea, metrics
| 5,888 | 41.366906 | 159 | py |
Elektrum | Elektrum-main/src/tests/test_transfer_learn.py |
from distutils.sysconfig import customize_compiler
import tensorflow as tf
import numpy as np
import os
from src.data import load_finkelstein_data as get_data
from src.transfer_learn import KinnLayer
from src.reload import reload_from_dir
# test
include_ref = True
seq = tf.keras.layers.Input((25,13), name='seq')
x = tf.keras.layers.GlobalAveragePooling1D()(seq)
x = tf.keras.layers.Dense(units=10)(x)
out = KinnLayer(
kinn_dir="outputs/2022-05-21/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep4-gRNA1/",
manager_kws={'output_op': lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(x)/np.log(10), name="output")},
channels=np.arange(4,13),
kinn_trainable=False
)([x, seq])
model = tf.keras.models.Model(inputs=seq, outputs=out)
kinn_indep = reload_from_dir(
wd="outputs/2022-05-21/KINN-wtCas9_cleave_rate_log-finkelstein-0-rep4-gRNA1/",
manager_kwargs={'output_op': lambda: tf.keras.layers.Lambda(lambda x: tf.math.log(x)/np.log(10), name="output")},
sess=tf.keras.backend.get_session()
)
(x_train, y_train), (x_test, y_test) = get_data(logbase=10, include_ref=True)
# test forward
print("FORWARD")
print(model.predict(x_test[0:10]))
print('-'*10)
kinn_before_train = model.layers[-1].kinn_header.predict(model.layers[-1].mb.blockify_seq_ohe(x_test[0:3, :, 4:]))
kinn_indep_pred = kinn_indep.model.predict(model.layers[-1].mb.blockify_seq_ohe(x_test[0:10, :, 4:]))
print(kinn_indep_pred)
print('-'*10)
# test backward
print("BACKWARD")
model.compile(loss='mse', optimizer='adam')
losses = []
for _ in range(15):
losses.append(model.train_on_batch(x=x_test[0:3], y=np.ones((3,1))))
print(losses)
print(model.predict(x_test[0:3]))
kinn_after_train = model.layers[-1].kinn_header.predict(model.layers[-1].mb.blockify_seq_ohe(x_test[0:3, :, 4:]))
assert np.array_equal(kinn_before_train, kinn_after_train), "KINN changed"
# test save & load
print("SAVE & LOAD")
model.save_weights('test.h5')
model.load_weights('test.h5')
model.save('test.h5')
# not passing
#model = tf.keras.models.load_model('test.h5', custom_objects={'KinnLayer': KinnLayer}) | 2,094 | 34.508475 | 117 | py |
MSVoxelDNN | MSVoxelDNN-master/training/ms_voxel_cnn_training.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import numpy as np
import matplotlib.pyplot as plt
import random as rn
import os
import time
from torchvision import datasets, transforms
import argparse
from torchsummary import summary
from pyntcloud import PyntCloud
from glob import glob
from utils.training_tools import save_ckp,load_ckp,compute_metric, Rotation, Random_sampling
import datetime
from torchvision.transforms import Compose
from torch.utils.tensorboard import SummaryWriter
'''RESNET BLOCKS'''
def conv_bn(in_channels, out_channels, *args, **kwargs):
return nn.Sequential(nn.Conv3d(in_channels, out_channels, *args, **kwargs), nn.BatchNorm3d(out_channels))
class ResnetBlock(nn.Module):
def __init__(self, in_filters, out_filters, *args, **kwargs):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.block=nn.Sequential(
conv_bn(self.in_filters,self.out_filters,kernel_size=1,stride=1, padding=0,bias=False),
nn.ReLU(),
conv_bn(self.out_filters,self.out_filters,kernel_size=3,stride=1, padding=1,bias=False),
nn.ReLU(),
conv_bn(self.out_filters,self.in_filters,kernel_size=1,stride=1, padding=0,bias=False),
)
def forward(self,x):
identity=x
out=self.block(x)
out=out+identity
out=F.relu(out)
return out
class ResNet_kkk(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=3,bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_kk2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(1,1,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_k2k2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(1,2,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_2k2k2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(2,2,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
'''Voxel CNN BLOCKS'''
class maskedConv3D(nn.Conv3d):
def __init__(self, masktype, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kD, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kD // 2, kH // 2, kW // 2 + (masktype == 'B'):] = 0
self.mask[:, :, kD // 2, kH // 2 + 1:, :] = 0
self.mask[:, :, kD // 2 + 1:, :, :] = 0
def forward(self, x):
self.weight.data *= self.mask
return super(maskedConv3D, self).forward(x)
class maskedResnet(nn.Module):
def __init__(self, no_filters):
super().__init__()
self.no_filters = no_filters
self.conv2a = nn.Conv3d(in_channels=2 * self.no_filters, out_channels=self.no_filters, kernel_size=1, stride=1,
padding=0)
self.conv2b = maskedConv3D(masktype='B', in_channels=self.no_filters, out_channels=self.no_filters,
kernel_size=3, stride=1, padding=1)
self.conv2c = nn.Conv3d(in_channels=self.no_filters, out_channels=2 * self.no_filters, kernel_size=1, stride=1,
padding=0)
def forward(self, x):
identity = x
out = self.conv2a(x)
out = F.relu(out)
out = self.conv2b(out)
out = F.relu(out)
out = self.conv2c(out)
out += identity
return out
class VoxelCNN(nn.Module):
def __init__(self, input_channel):
super().__init__()
self.pixelcnn = nn.Sequential(
maskedConv3D(masktype='A', in_channels=input_channel, out_channels=64, kernel_size=7, stride=1, padding=3),
maskedResnet(32),
maskedConv3D(masktype='B', in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
maskedConv3D(masktype='B', in_channels=32, out_channels=2, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
)
def forward(self, x):
# print(x.size())
batch, cin, d, h, w = x.size()
# print(batch, cin, h,w)
assert torch.sum(torch.isnan(x)) == 0
out = self.pixelcnn(x)
# print(out.size())
out = out.view(batch, 2, d, h, w)
# out = out.permute(0, 1, 3, 4, 2)
# print(out.shape)
return out
''' BUILDING PIPELINE, MERGE AND SPLIT'''
class MSVoxelCNN(nn.Module):
def __init__(self, Mpatch, input_channel,input_size,no_resnet,group):
super().__init__()
self.Mpatch=Mpatch
self.input_channel=input_channel
self.input_size=input_size
self.VoxelCNN=VoxelCNN(32)
self.patch_size=self.input_size//self.Mpatch
self.group = group
if (self.group <= 1):
self.Resnet = ResNet_kkk(1, 32, no_resnet,
ResnetBlock) # 1 is number of input channel ,32 is number of output, 12 is number of resnet block
elif (self.group == 2):
self.Resnet = ResNet_kk2k(1, 32, no_resnet, ResnetBlock)
elif (self.group == 3 or self.group == 4):
self.Resnet = ResNet_k2k2k(1, 32, no_resnet, ResnetBlock)
else:
self.Resnet = ResNet_2k2k2k(1, 32, no_resnet, ResnetBlock)
def forward(self,x):
#x=self.maxpooling(x)
ResnetFeature = self.Resnet(x)
patches = ResnetFeature.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size).unfold(4, self.patch_size, self.patch_size)
unfold_shape = patches.size()
patches_orig = torch.zeros(unfold_shape[0],2,unfold_shape[2],unfold_shape[3],unfold_shape[4],unfold_shape[5],unfold_shape[6],unfold_shape[7])
for i in range (unfold_shape[2]):
for j in range (unfold_shape[3]):
for k in range (unfold_shape[4]):
patches_orig[:,:,i,j,k,:,:,:]= self.VoxelCNN(patches[:,:,i,j,k,:,:,:])
output_d = unfold_shape[2] * unfold_shape[5]
output_h = unfold_shape[3] * unfold_shape[6]
output_w = unfold_shape[4] * unfold_shape[7]
patches_orig = patches_orig.permute(0,1,2,5,3,6,4,7).contiguous()
patches_orig = patches_orig.view(unfold_shape[0],2,output_d, output_h, output_w)
return patches_orig
class PCdataset(Dataset):
def __init__(self, files, transforms=None):
self.files=np.asarray(files)
self.transforms=transforms
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
pc=PyntCloud.from_file(self.files[idx])
points=pc.points.to_numpy()[:,:3]
if(self.transforms):
points=self.transforms(points)
try:
points = np.unique(points, axis=0)
except:
return None
points=torch.from_numpy(points).type(torch.LongTensor)
#print(points.shape)
v=torch.ones(points.shape[0])
dense_block=torch.sparse.FloatTensor(torch.transpose( points,0,1),v, torch.Size([64,64,64])).to_dense().view(1,64,64,64)
#print(dense_block.shape, torch.max(dense_block), torch.min(dense_block), torch.count_nonzero(dense_block))
return dense_block
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def data_collector(training_dirs,params):
total_files = []
for training_dir in training_dirs:
training_dir = training_dir + '**/*.ply'
files = glob(training_dir, recursive=True)
print('Total files: ',len(files))
total_files_len = len(files)
total_files = np.concatenate((total_files, files), axis=0)
print('Selected ', len(files), ' from ', total_files_len, ' in ', training_dir)
assert len(total_files) > 0
rn.shuffle(total_files) # shuffle file
print('Total blocks for training: ', len(total_files))
files_cat = np.array([os.path.split(os.path.split(x)[0])[1] for x in total_files])
files_train = total_files[files_cat == 'train']
files_valid = total_files[files_cat == 'test']
rotation = Rotation(64)
sampling = Random_sampling()
#rotation, sampling,
transforms_ = Compose([rotation, sampling])
#,transforms.ToTensor()
training_set = PCdataset(files_train)
training_generator = torch.utils.data.DataLoader(training_set,collate_fn=collate_fn, **params)
# Validation data
valid_set = PCdataset(files_valid)
valid_generator = torch.utils.data.DataLoader(valid_set, collate_fn=collate_fn,**params)
return training_generator, valid_generator
def index_hr(group,d,h,w):#generate the index to select high resolution from block d,h,w for input
index=[[np.arange(0,d,2),np.arange(0,h,2),np.arange(0,w,2)],
[np.arange(0,d,2),np.arange(0,h,2),np.arange(0,w,1)],
[np.arange(0,d,2),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,2),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,1),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,1),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,1),np.arange(0,h,1),np.arange(0,w,1)]]
return index[group]
def index_lr(group, d,h,w):#generate the index of the value that will be assigned to 0
assign_zeros=[None,
None,
[np.arange(0,d,1),np.arange(1,h,2),np.arange(1,w,2)],
None,
[np.arange(1,d,2),np.arange(0,h,2),np.arange(1,w,2)],
[np.arange(1,d,2),np.arange(1,h,2),np.arange(0,w,1)],
[np.arange(1,d,2),np.arange(1,h,2),np.arange(1,w,2)]]
return assign_zeros[group]
def train(use_cuda, batch_size,low_res, max_epochs,group, output_model, dataset_path, valid_loss_min, model,
optimizer, start_epoch, ds_level):
#tensorboard writer:
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = output_model + 'log' + current_time + '/train'
test_log_dir = output_model + 'log' + current_time + '/test'
train_summary_writer = SummaryWriter(train_log_dir)
test_summary_writer = SummaryWriter(test_log_dir)
#checkpoint setup
checkpoint_path = output_model + "current_checkpoint.pt"
best_model_path = output_model + "best_model.pt"
eps = 1e-8
params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': 2}
device = torch.device("cuda" if use_cuda else "cpu")
model = model.to(device)
training_generator, valid_generator=data_collector(dataset_path,params)
maxpool_n1 = nn.Sequential(
*[nn.MaxPool3d(kernel_size=2) for _ in range(ds_level - 1)]
)
maxpool_n = nn.Sequential(
*[nn.MaxPool3d(kernel_size=2) for _ in range(ds_level)]
)
train_loss = 0
train_losses = []
best_val_epoch = None
output_period=len(training_generator)//20
block_size=int(low_res*2)
idx = np.unravel_index(group, (2, 2, 2))
print('Start the training for group: ', group, ' with lower resoltuion is ', low_res)
for epoch in range(start_epoch, max_epochs):
for batch_idx, x in enumerate(training_generator):
x=x.to(device)
if (group == 0):
input = maxpool_n(x)
else:
input = maxpool_n1(x)
index = index_hr(group - 1, block_size, block_size, block_size)
input = input[:, :, index[0][:, None, None], index[1][:, None], index[2]]
_, _, ld, lh, lw = input.shape
index_0 = index_lr(group - 1, ld, lh, lw)
if (index_0 is not None):
input[:, :, index_0[0][:, None, None], index_0[1][:, None], index_0[2]] = 0
#if(batch_idx==1):
# print(input.shape)
if (group == 5):
input[:, :, 1:ld:2, 1:lh:2, :]=0
if(epoch==0 and batch_idx==0):
print('Input shape: ', input.shape)
target = maxpool_n1(x.clone().detach())[:, :, idx[0]:block_size:2, idx[1]:block_size:2, idx[2]:block_size:2].view(x.shape[0], low_res, low_res,
low_res).type(torch.LongTensor)
predict = model(input) + eps
#print(predict.shape, torch.max(target), torch.min(target), target.shape)
loss = F.cross_entropy(predict,target) # predict: shape of input: https://pytorch.org/docs/stable/nn.functional.html#cross-entropy
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.item() - train_loss))
tp, fp, tn, fn, precision, recall, accuracy, specificity, f1 = compute_metric(predict, target,
train_summary_writer, len(
training_generator) * epoch + batch_idx)
train_summary_writer.add_scalar("bc/loss", train_loss, len(training_generator) * epoch + batch_idx)
if(batch_idx%output_period==0):
print("Batch {} over {}: \tloss : {:.6f}\t accuracy : {:.3f} tp : {:.2f} fp : {:.2f} tn : {:.2f} fn : {:.2f} f1 : {:.4f}".format(batch_idx,len(training_generator), train_loss, accuracy, tp, fp, tn, fn, f1), end='\r')
del loss, target,predict
train_losses.append(train_loss)
#print(train_losses)
# validation
valid_loss = 0
model.eval()
for batch_idx, x in enumerate(valid_generator):
x=x.to(device)
if (group == 0):
input = maxpool_n(x)
else:
input = maxpool_n1(x)
index = index_hr(group - 1, block_size, block_size, block_size)
input = input[:, :, index[0][:, None, None], index[1][:, None], index[2]]
_, _, ld, lh, lw = input.shape
index_0 = index_lr(group - 1, ld, lh, lw)
if (index_0 is not None):
input[:, :, index_0[0][:, None, None], index_0[1][:, None], index_0[2]] = 0
if (group == 5):
input[:, :, 1:ld:2, 1:lh:2, :]=0
target = maxpool_n1(x.clone().detach())[:, :, idx[0]:block_size:2, idx[1]:block_size:2, idx[2]:block_size:2].view(
x.shape[0], low_res, low_res,
low_res).type(torch.LongTensor)
output = model(input) + eps
loss = F.cross_entropy(output, target)
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.item() - valid_loss))
valid_loss=valid_loss
del loss, target, output
test_summary_writer.add_scalar("bc/loss", valid_loss, epoch)
print('Training for group: ', group, ' downsample level: ', ds_level)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss))
# saving model
# create checkpoint variable and add important data
checkpoint = {
'epoch': epoch + 1,
'valid_loss_min': valid_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
# save checkpoint
save_ckp(checkpoint, False, checkpoint_path, best_model_path)
if valid_loss <= valid_loss_min or best_val_epoch ==None:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min, valid_loss))
# save checkpoint as best model
save_ckp(checkpoint, True, checkpoint_path, best_model_path)
valid_loss_min = valid_loss
best_val_epoch=epoch
if(epoch-best_val_epoch>=10):
print('Early stopping detected')
break
# /datnguyen_dataset/database/Modelnet40/ModelNet40_200_pc512_oct3/
if __name__ == "__main__":
# Command line main application function.
parser = argparse.ArgumentParser(description='Encoding octree')
parser.add_argument("-scratch", '--scratch', type=lambda x: (str(x).lower() in ['true','1', 'yes']),
default=False,
help='Training from scratch or checkpoint')
parser.add_argument("-usecuda", '--usecuda', type=bool,
default=True,
help='using cuda or not')
parser.add_argument("-batch", '--batch', type=int,
default=8,
help='batch size')
parser.add_argument("-blocksize", '--blocksize', type=int,
default=64,
help='training block size')
parser.add_argument("-epoch", '--epoch', type=int,
default=10,
help='number of epochs')
parser.add_argument("-group", '--group', type=int,
default=2,
help='building which model?')
parser.add_argument("-downsample", '--dslevel', type=int,
default=2,
help='number of downsampling step until group 1 level')
parser.add_argument("-nopatches", '--patches', type=int,
default=4,
help='Number of patches in spliting step')
parser.add_argument("-noresnet", '--noresnet', type=int,
default=4,
help='Number of patches in spliting step')
parser.add_argument("-inputmodel", '--savedmodel', type=str, help='path to saved model file')
# parser.add_argument("-loss", '--loss_img_name', type=str, help='name of loss image')
parser.add_argument("-outputmodel", '--saving_model_path', type=str, help='path to output model file')
parser.add_argument("-dataset", '--dataset', action='append', type=str, help='path to dataset ')
parser.add_argument("-validation", '--validation', type=str, help='path to validation dataset ')
parser.add_argument("-portion_data", '--portion_data', type=float,
default=1,
help='portion of dataset to put in training, densier pc are selected first')
args = parser.parse_args()
low_res=int(64/(2**args.dslevel))
model = MSVoxelCNN(args.patches, 1, low_res, args.noresnet,args.group)
maxpool = nn.Sequential(
*[nn.MaxPool3d(kernel_size=2) for _ in range(args.dslevel - 1)]
)
valid_loss_min = np.Inf
device = torch.device("cuda" if args.usecuda else "cpu")
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
start_epoch = 0
output_path=args.saving_model_path+'G'+str(args.group)+'_lres'+str(low_res)+'/'
os.makedirs(output_path,exist_ok=True)
if (args.scratch):
print("Training from scratch \n")
train(args.usecuda, args.batch,low_res, args.epoch, args.group, output_path, args.dataset,
valid_loss_min, model, optimizer, start_epoch, args.dslevel)
else:
ckp_path = output_path + "current_checkpoint.pt"
model, optimizer, start_epoch, valid_loss_min = load_ckp(ckp_path, model, optimizer)
print('Successfully loaded model \n')
train(args.usecuda, args.batch,low_res, args.epoch,args.group, output_path, args.dataset,
valid_loss_min, model, optimizer, start_epoch, args.dslevel) | 20,975 | 42.428571 | 233 | py |
MSVoxelDNN | MSVoxelDNN-master/training/voxel_dnn_training_torch.py | # VoxelCNN
import random as rn
import numpy as np
from utils.inout import input_fn_voxel_dnn, get_shape_data, get_files, load_points
import os
import sys
import argparse
import datetime
from utils.training_tools import save_ckp,load_ckp,compute_metric, Rotation, Random_sampling
import datetime
from torchvision.transforms import Compose
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from pyntcloud import PyntCloud
from glob import glob
import math
from torchsummary import summary
random_seed = 42
np.random.seed(random_seed)
rn.seed(random_seed)
class maskedConv3D(nn.Conv3d):
def __init__(self, masktype, *args, **kwargs):
super().__init__( *args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kD, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kD // 2, kH // 2, kW // 2 + (masktype == 'B'):] = 0
self.mask[:, :, kD // 2, kH // 2 + 1:, :] = 0
self.mask[:, :, kD // 2 + 1:, :, :] = 0
def forward(self, x):
self.weight.data *= self.mask
return super(maskedConv3D, self).forward(x)
class residualBlock(nn.Module):
def __init__(self, h):
super().__init__()
self.no_filters=h
self.conva=nn.Conv3d(in_channels=2*h, out_channels=h,kernel_size=1, stride=1, padding=0 )
self.convb=maskedConv3D(masktype='B',in_channels=h, out_channels=h, kernel_size=5, stride=1, padding= 2)
self.convc = nn.Conv3d(in_channels=h, out_channels=2*h, kernel_size=1, stride=1, padding=0)
def forward(self, x):
identity = x
out = self.conva(x)
out = F.relu(out)
out = self.convb(out)
out = F.relu(out)
out = self.convc(out)
out += identity
return out
class VoxelDNN(nn.Module):
def __init__(self,depth = 64, height = 64, width = 64, n_channel = 1, output_channel = 2, residual_blocks = 2, n_filters = 64):
self.depth = depth
self.height = height
self.width = width
self.n_channel = n_channel
self.output_channel = output_channel
self.residual_blocks = residual_blocks
self.n_filters = n_filters
self.init__ = super(VoxelDNN, self).__init__()
self.voxelcnn = nn.Sequential(
maskedConv3D(masktype='A', in_channels=self.n_channel, out_channels=self.n_filters, kernel_size=7, stride=1, padding=3),
*[residualBlock(self.n_filters//2) for _ in range(self.residual_blocks)],
nn.ReLU(),
nn.Conv3d(in_channels=self.n_filters, out_channels=self.n_filters, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.Conv3d(in_channels=self.n_filters, out_channels=self.output_channel, kernel_size=1, stride=1, padding=0),
)
def forward(self,x):
out=self.voxelcnn(x)
return out
class PCdataset(Dataset):
def __init__(self, files,block_size, transforms=None):
self.files=np.asarray(files)
self.transforms=transforms
self.bz=block_size
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
pc=PyntCloud.from_file(self.files[idx])
points=pc.points.to_numpy()[:,:3]
if(self.transforms):
points=self.transforms(points)
try:
points = np.unique(points, axis=0)
except:
return None
points=torch.from_numpy(points).type(torch.LongTensor)
v=torch.ones(points.shape[0])
#print('number of points: ', points.shape)
dense_block=torch.sparse.FloatTensor(torch.transpose( points,0,1),v, torch.Size([self.bz,self.bz,self.bz])).to_dense().view(1,self.bz,self.bz,self.bz)
#print('Number of ocv voxels: ', torch.sum(dense_block))
return dense_block
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def data_collector(training_dirs,transform_idx,params):
total_files = []
for training_dir in training_dirs:
training_dir = training_dir + '**/*.ply'
files = glob(training_dir, recursive=True)
print('Total files: ',len(files))
total_files_len = len(files)
total_files = np.concatenate((total_files, files), axis=0)
print('Selected ', len(files), ' from ', total_files_len, ' in ', training_dir)
#total_files=total_files[:5000]
assert len(total_files) > 0
rn.shuffle(total_files) # shuffle file
print('Total blocks for training: ', len(total_files))
files_cat = np.array([os.path.split(os.path.split(x)[0])[1] for x in total_files])
files_train = total_files[files_cat == 'train']
files_valid = total_files[files_cat == 'test']
rotation = Rotation(64)
sampling = Random_sampling()
#rotation, sampling,
both = Compose([rotation, sampling])
#,transforms.ToTensor()
transformations=[None, rotation, sampling, both]
training_set = PCdataset(files_train,64, transformations[transform_idx])
training_generator = torch.utils.data.DataLoader(training_set,collate_fn=collate_fn, **params)
# Validation data
valid_set = PCdataset(files_valid, 64, transformations[transform_idx])
valid_generator = torch.utils.data.DataLoader(valid_set, collate_fn=collate_fn,**params)
return training_generator, valid_generator
def train(use_cuda, batch_size, max_epochs, output_model, dataset_path, valid_loss_min, model,
optimizer, start_epoch, block_size, transformations):
#tensorboard writer:
#current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = output_model + 'log' + '/train'
test_log_dir = output_model + 'log' + '/test'
train_summary_writer = SummaryWriter(train_log_dir)
test_summary_writer = SummaryWriter(test_log_dir)
#checkpoint setup
checkpoint_path = output_model + "current_checkpoint.pt"
best_model_path = output_model + "best_model.pt"
eps = 1e-8
params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': 2}
device = torch.device("cuda" if use_cuda else "cpu")
print('Optimizer state: ', optimizer)
training_generator, valid_generator=data_collector(dataset_path,transformations,params)
ds_level=int(math.log2((64//block_size)))
maxpool_n = nn.Sequential(
*[nn.MaxPool3d(kernel_size=2) for _ in range(ds_level)]
).to(device)
train_loss = 0
train_losses = []
best_val_epoch = None
output_period=len(training_generator)//20
for epoch in range(start_epoch, max_epochs):
for batch_idx, x in enumerate(training_generator):
x=x.to(device)
x_donwscale=maxpool_n(x)
# x_donwscale = torch.ones_like(x_donwscale).to(device)
if(epoch==0 and batch_idx==0):
print('Input shape: ', x_donwscale.shape)
target = x_donwscale
db, _, dd, dh, dw = x_donwscale.shape
target=target.view(db,dd,dh, dw).type(torch.LongTensor).to(x_donwscale.device)
predict = model(x_donwscale) + eps
loss = F.cross_entropy(predict, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.item() - train_loss))
tp, fp, tn, fn, precision, recall, accuracy, specificity, f1 = compute_metric(predict, target,
train_summary_writer, len(
training_generator) * epoch + batch_idx)
train_summary_writer.add_scalar("bc/loss", train_loss, len(training_generator) * epoch + batch_idx)
if((batch_idx%output_period)==0):
#ocv=1-torch.sum(x_donwscale)/(db*dd*dh*dw)
print("Batch {} over {}: \tloss : {:.6f}\t accuracy : {:.3f} tp : {:.2f} fp : {:.2f} tn : {:.2f} fn : {:.2f} F1 : {:.4f}".format(batch_idx,len(training_generator), train_loss, accuracy, tp, fp, tn, fn, f1), end='\r')
del loss, target,predict
#break
train_losses.append(train_loss)
#print(train_losses)
# validation
with torch.no_grad():
valid_loss = 0
model.eval()
for batch_idx, x in enumerate(valid_generator):
x = x.to(device)
x_donwscale = maxpool_n(x)
target = x_donwscale
db, _, dd, dh, dw = x_donwscale.shape
target = target.view(db, dd, dh, dw).type(torch.LongTensor).to(x_donwscale.device)
predict = model(x_donwscale) + eps
loss = F.cross_entropy(predict, target)
#loss = F.cross_entropy(output, target)
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.item() - valid_loss))
del loss, target, predict
test_summary_writer.add_scalar("bc/loss", valid_loss, epoch)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss))
# saving model
# create checkpoint variable and add important data
checkpoint = {
'epoch': epoch + 1,
'valid_loss_min': valid_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
# save checkpoint
save_ckp(checkpoint, False, checkpoint_path, best_model_path)
if valid_loss <= valid_loss_min or best_val_epoch ==None:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min, valid_loss))
# save checkpoint as best model
save_ckp(checkpoint, True, checkpoint_path, best_model_path)
valid_loss_min = valid_loss
best_val_epoch=epoch
if(epoch-best_val_epoch>=10):
print('Early stopping detected')
break
if __name__ == "__main__":
# Command line main application function.
parser = argparse.ArgumentParser(description='Encoding octree')
parser.add_argument("-blocksize", '--block_size', type=int,
default=64,
help='input size of block')
parser.add_argument("-nfilters", '--n_filters', type=int,
default=64,
help='Number of filters')
parser.add_argument("-batch", '--batch_size', type=int,
default=2,
help='batch size')
parser.add_argument("-epochs", '--epochs', type=int,
default=2,
help='number of training epochs')
parser.add_argument("-inputmodel", '--savedmodel', type=str, help='path to saved model file')
#parser.add_argument("-loss", '--loss_img_name', type=str, help='name of loss image')
parser.add_argument("-outputmodel", '--saving_model_path', type=str, help='path to output model file')
parser.add_argument("-dataset", '--dataset', action='append', type=str, help='path to dataset ')
parser.add_argument("-portion_data", '--portion_data', type=float,
default=1,
help='portion of dataset to put in training, densier pc are selected first')
parser.add_argument("-scratch", '--scratch', type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
default=False,
help='Training from scratch or checkpoint')
parser.add_argument("-usecuda", '--usecuda', type=bool,
default=True,
help='using cuda or not')
parser.add_argument("-lr", '--lr', type=int,
default=3,
help='index of learning rate: 1e-5,5e-5, 1e-4, 1e-3')
parser.add_argument("-tf", '--transformations', type=int,
default=0,
help='0: none, 1: rotation, 2: random sampling, 3: both')
args = parser.parse_args()
block_size=args.block_size
model = VoxelDNN(depth=block_size,height=block_size,width=block_size,n_channel=1,output_channel=2,residual_blocks=2,n_filters=args.n_filters)
valid_loss_min = np.Inf
device = torch.device("cuda" if args.usecuda else "cpu")
model = model.to(device)
summary(model, (1, block_size, block_size, block_size))
print(model)
lrs=[1e-6,1e-5,5e-5, 1e-4, 1e-3, 1e-2]
lr=lrs[args.lr]
print('Selected learning rate: ', lr)
#optimizer = torch.optim.Adam(model.parameters(), lr=lr)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-6, amsgrad=False)
#optimizer = torch.optim.RMSprop(model.parameters(), lr=lr, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
start_epoch = 0
output_path=args.saving_model_path+'BL'+str(args.block_size)+'_tf'+str(args.transformations)+'/'
os.makedirs(output_path,exist_ok=True)
if (args.scratch):
print("Training from scratch \n")
train(args.usecuda, args.batch_size, args.epochs, output_path, args.dataset,
valid_loss_min, model, optimizer, start_epoch, args.block_size,args.transformations)
else:
try:
ckp_path = output_path + "best_model.pt"
model, optimizer, start_epoch, valid_loss_min = load_ckp(ckp_path, model, optimizer) #load optimize and start epooch from check point
print('Successfully loaded model \n')
train(args.usecuda, args.batch_size, args.epochs, output_path, args.dataset,
valid_loss_min, model, optimizer, start_epoch, args.block_size, args.transformations)
except:
train(args.usecuda, args.batch_size, args.epochs, output_path, args.dataset,
valid_loss_min, model, optimizer, start_epoch, args.block_size, args.transformations)
#python3 -m training.voxel_dnn_training -blocksize 64 -nfilters 64 -inputmodel Model/voxelDNN_CAT1 -outputmodel Model/voxelDNN_CAT1 -dataset /datnguyen_dataset/database/Modelnet40/ModelNet40_200_pc512_oct3/ -dataset /datnguyen_dataset/database/CAT1/cat1_selected_vox10_oct4/ -batch 8 -epochs 30
#python3 -m training.voxel_mixing_context -epoch 50 -blocksize 64 -outputmodel Model/voxelDnnSuperRes/ -inputmodel Model/voxelDnnSuperRes -dataset /datnguyen_dataset/database/Microsoft/10bitdepth_selected_oct4/ -dataset /datnguyen_dataset/database/MPEG/selected_8i_oct4/ -dataset /datnguyen_dataset/database/Modelnet40/ModelNet40_200_pc512_oct3/ -batch 8 -nfilters 64
# python3 -m training.voxel_dnn_training_torch -usecuda 1 -dataset /datnguyen_dataset/database/Modelnet40/ModelNet40_200_pc512_oct3/ -dataset /datnguyen_dataset/database/Microsoft/10bitdepth_selected_oct4/ -dataset /datnguyen_dataset/database/MPEG/selected_8i_oct4/ -dataset /datnguyen_dataset/database/CAT1/cat1_selected_vox10_oct4/ -outputmodel Model/MSVoxelCNNP/ -epoch 100 --scratch=0 -batch 32 -nopatches 2 -group 1 -downsample 1 -noresnet 4
# # python3 -m training.voxel_dnn_training_torch -usecuda 1 -dataset /datnguyen_dataset/database/MPEG/selected_8i_oct4/ -outputmodel Model/VoxelDNNTorch/ -epoch 1 --scratch=1 -batch 32
# new platform :
# python3 -m training.voxel_dnn_training_torch -usecuda 1 -dataset ../../Datasets/ModelNet40_200_pc512_oct3/ -outputmodel Model/VoxelDNNTorch/ -epoch 3 --scratch=1 -batch 8 -tf 0 -lr 4
# python3 -m training.voxel_dnn_training_torch -usecuda 1 -dataset /Users/thanhdatnguyen/Documents/Works/INTERN/src/OctreeCoding/block_64/ -outputmodel Model/VoxelDNNTorch/ -epoch 3 --scratch=1 -batch 8 -tf 0 -lr 4 -nfilters 16 -blocksize 16 | 15,785 | 44.756522 | 448 | py |
MSVoxelDNN | MSVoxelDNN-master/ms_voxel_dnn_coder/ms_voxel_dnn_encoder.py | import numpy as np
import os
import argparse
import time
from utils.inout import occupancy_map_explore, pmf_to_cdf
from utils.metadata_endec import save_compressed_file
import gzip
import pickle
from training.voxel_dnn_training_torch import VoxelDNN
import torchac
import torch
import torch.nn as nn
from training.ms_voxel_cnn_training import index_hr, index_lr, MSVoxelCNN
def encoder(args):
global Models, device, VoxelDNN8, lowest_bits
pc_level, ply_path, output_path, signaling, downsample_level, saved_model_path, voxeldnn8_path = args
departition_level = pc_level - 6
# getting encoding input data
lowest_bits = 0
blocks, binstr, no_oc_voxels = occupancy_map_explore(ply_path, pc_level, departition_level)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# For output signal
sequence_name = os.path.split(ply_path)[1]
sequence = os.path.splitext(sequence_name)[0]
output_path = output_path + str(sequence) + '/' + signaling + '/'
os.makedirs(output_path, exist_ok=True)
outputMSVoxelDNN = output_path + 'blocks.bin'
outputVoxelDNN = output_path + 'baseblocks.bin'
metadata_file = output_path + 'metadata.bin'
heatmap_file = output_path + 'heatmap.pkl'
start = time.time()
# Restore MSVoxelDNN
Models = []
for lv in range(downsample_level):
for gr in range(8):
low_res = int(64 // (2 ** (lv + 1)))
model = MSVoxelCNN(2, 1, low_res, 4, gr)
ckp_path = saved_model_path + 'G' + str(gr) + '_lres' + str(low_res) + '/' + "best_model.pt"
print(ckp_path)
checkpoint = torch.load(ckp_path)
model.load_state_dict(checkpoint['state_dict'])
model.eval().to(device)
Models.append(model)
print('Sucessfully loaded ', len(Models), ' models')
heatmap = []
no_oc_voxels = 0
# restore torch voxelCNN
VoxelDNN8 = VoxelDNN(depth=8, height=8, width=8, residual_blocks=2, n_filters=64)
checkpoint = torch.load(voxeldnn8_path + 'best_model.pt')
VoxelDNN8.load_state_dict(checkpoint['state_dict'])
VoxelDNN8.eval().to(device)
# Encoding block
i = 0
print('Encoding ', blocks.shape[0], ' blocks ')
with open(outputMSVoxelDNN, 'wb') as bitoutms, open(outputVoxelDNN, 'wb') as bitoutvx:
for block in blocks:
print('Encoding block ', i, ' over ', blocks.shape[0], ' blocks ', end='\r')
encode_log, ocv = blockCoder(block, bitoutms, bitoutvx, downsample_level)
heatmap.append(encode_log)
no_oc_voxels += ocv
i += 1
with open(heatmap_file, 'wb') as f:
pickle.dump(heatmap, f)
with gzip.open(metadata_file, "wb") as f:
ret = save_compressed_file(binstr, pc_level, departition_level)
f.write(ret)
total_no_ocv = no_oc_voxels
basebits = int(os.stat(outputVoxelDNN).st_size) * 8
file_size = int(os.stat(outputMSVoxelDNN).st_size) * 8
metadata_size = int(os.stat(metadata_file).st_size) * 8
total_size = file_size + metadata_size + basebits
avg_bpov = (total_size) / total_no_ocv
print('Encoded file: ', ply_path)
end = time.time()
print('Encoding time: ', end - start)
print('Occupied Voxels: %04d' % total_no_ocv)
print('Blocks bitstream: ', outputMSVoxelDNN)
print('Baseblock bitstream: ', outputVoxelDNN)
print('Metadata bitstream', metadata_file)
print('Encoding information: ', heatmap_file)
print('Average bits per occupied voxels: %.04f' % avg_bpov)
print('Percent of base level: %.04f' % (basebits / total_size))
print('Total file size: ', total_size)
def blockCoder(block, bitout_msvoxeldnn, bitout_voxeldnn, downsample_level):
global device, lowest_bits
d, h, w, _ = block.shape
block = torch.from_numpy(block).view(1, 1, d, h, w)
ocv = torch.sum(block).item()
heatmap = []
for lv in range(downsample_level):
ds_sampler = nn.Sequential(*[nn.MaxPool3d(kernel_size=2) for _ in range(lv + 1)]).to(device)
curr_sampler = nn.Sequential(*[nn.MaxPool3d(kernel_size=2) for _ in range(lv)]).to(device)
curr_block = curr_sampler(block.detach().clone().to(device))
ds_block = ds_sampler(block.detach().clone().to(device))
total_bits = 0
for gr in range(8):
predicted_probs = predictor(curr_block, ds_block, lv, gr)
bytestream = torchacCoder(curr_block, predicted_probs, ds_block, gr)
total_bits += len(bytestream * 8)
bitout_msvoxeldnn.write(bytestream)
heatmap.append([total_bits, torch.sum(curr_block).item()])
if lv == downsample_level - 1:
bits = baseLevelCoder(ds_block, bitout_voxeldnn)
lowest_bits += bits
heatmap.append([bits, torch.sum(ds_block).item()])
return heatmap, ocv
def predictor(curr_block, ds_block, curr_lv, group):
global Models, device
_, _, d, h, w = curr_block.shape
if group == 0:
input = ds_block
else:
index = index_hr(group - 1, d, h, w)
input = curr_block[:, :, index[0][:, None, None], index[1][:, None], index[2]]
_, _, ld, lh, lw = input.shape
index_0 = index_lr(group - 1, ld, lh, lw)
if (index_0 is not None):
input[:, :, index_0[0][:, None, None], index_0[1][:, None], index_0[2]] = 0
if (group == 5):
input[:, :, 1:ld:2, 1:lh:2, :] = 0
input = input.to(device)
group_prediction = Models[curr_lv * 8 + group](input)
return torch.nn.Softmax(dim=0)(group_prediction[0])
def torchacCoder(curr_block, predicted_probs, ds_block, group):
_, _, d, h, w = curr_block.shape
idx = np.unravel_index(group, (2, 2, 2))
curr_block = curr_block[0, 0, idx[0]:d:2, idx[1]:h:2, idx[2]:w:2]
pd, ph, pw = curr_block.shape
ds_block = ds_block.type(torch.bool).view(pd, ph, pw)
syms = curr_block[ds_block].type(torch.int16)
probs = predicted_probs[:, ds_block].transpose(0, 1)
predicted_cdf = pmf_to_cdf(probs)
predicted_cdf = predicted_cdf.detach().cpu()
filtered_curr_block = syms.detach().cpu()
byte_stream = torchac.encode_float_cdf(predicted_cdf, filtered_curr_block, check_input_bounds=True)
return byte_stream
def baseLevelCoder(box, bitout):
global device, VoxelDNN8
# Torch voxeldnn model
probs = torch.nn.Softmax(dim=0)(VoxelDNN8(box)[0])
probs = probs.permute(1, 2, 3, 0)
block = box[0, 0, :, :, :]
block = block.type(torch.int16).cpu()
probs = pmf_to_cdf(probs.cpu())
byte_stream = torchac.encode_float_cdf(probs, block, check_input_bounds=True)
curr_bit = len(byte_stream) * 8
if (bitout != 1):
bitout.write(byte_stream)
return curr_bit
# Main launcher
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Encoding octree')
parser.add_argument("-level", '--octreedepth', type=int,
default=10,
help='depth of input octree to pass for encoder')
parser.add_argument("-depth", '--downsamplingdepth', type=int,
default=3,
help='max depth to downsample, depth = 3: base block is 8')
parser.add_argument("-ply", '--plypath', type=str, help='path to input ply file')
parser.add_argument("-output", '--outputpath', type=str, help='path to output files', default='t')
parser.add_argument("-model", '--modelpath', type=str, help='path to input model file', default='t')
parser.add_argument("-signaling", '--signaling', type=str, help='special character for the output', default='t')
parser.add_argument("-model8", '--modelpath8', type=str, help='path to input model 8 .h5 file')
args = parser.parse_args()
encoder([args.octreedepth, args.plypath, args.outputpath, args.signaling, args.downsamplingdepth, args.modelpath,
args.modelpath8])
# python3 -m ms_voxel_dnn_coder.ms_voxel_dnn_encoder -level 10 -depth 3 -ply ../PCC2/TestPC/MPEG_thaidancer_viewdep_vox10.ply -output Output/ -signaling msvxdnn -model Model/MSVoxelDNN/ -model8 ../PCC2/Model/VoxelDNNTorch/BL8_tf0/
# ../PCC2/Model/VoxelDNNTorch/BL8_tf0/ TestPC/Microsoft_ricardo10_vox10_0011.ply
# python3 -m ms_voxel_dnn_coder.ms_voxel_dnn_encoder -level 10 -depth 3 -ply ../PCC2/TestPC/Microsoft_ricardo10_vox10_0011.ply -output Output/ -signaling msvxdnn -model Model/MSVoxelDNN/ -model8 ../PCC2/Model/VoxelDNNTorch/BL8_tf0/
| 8,518 | 41.383085 | 232 | py |
MSVoxelDNN | MSVoxelDNN-master/utils/inout.py | import numpy as np
from utils.octree_partition import partition_octree
import time
from glob import glob
import tensorflow as tf
import multiprocessing
from tqdm import tqdm
from pyntcloud import PyntCloud
import pandas as pd
import torch
#import open3d as o3d
#VOXEL-OCTREE
def timing(f):
def wrap(*args, **kwargs):
time1 = time.time()
ret = f(*args, **kwargs)
time2 = time.time()
print('{:s} function took {:.3f} ms'.format(f.__name__, (time2 - time1) * 1000.0))
return ret
return wrap
def get_bin_stream_blocks(path_to_ply, pc_level, departition_level):
# co 10 level --> binstr of 10 level, blocks size =1
level = int(departition_level)
pc = PyntCloud.from_file(path_to_ply)
points = pc.points.values
no_oc_voxels = len(points)
box = int(2 ** pc_level)
blocks2, binstr2 = timing(partition_octree)(points, [0, 0, 0], [box, box, box], level)
return no_oc_voxels, blocks2, binstr2
def voxel_block_2_octree(box,oct_seq):
box_size=box.shape[0]
child_bbox=int(box_size/2)
if(box_size>2):
for d in range(2):
for h in range(2):
for w in range(2):
child_box=box[d * child_bbox:(d + 1) * child_bbox, h * child_bbox:(h + 1) * child_bbox, w * child_bbox:(w + 1) * child_bbox]
if(np.sum(child_box)!=0):
oct_seq.append(1)
voxel_block_2_octree(child_box, oct_seq)
else:
oct_seq.append(0)
else:
curr_octant=[int(x) for x in box.flatten()]
oct_seq+=curr_octant
return oct_seq
#FOR VOXEL
def input_fn_super_res(points, batch_size, dense_tensor_shape32, data_format, repeat=True, shuffle=True, prefetch_size=1):
# Create input data pipeline.
def gen():
iterator=iter(points)
done=False
while not done:
try:
p = next(iterator)
except StopIteration:
done=True
else:
ds = np.abs(np.round((p - 0.01) / 2))
ds = np.unique(ds,axis=0)
yield (ds, p)
p_max = np.array([64, 64, 64])
dense_tensor_shape64 = np.concatenate([p_max, [1]]).astype('int64')
dense_tensor_shape=[dense_tensor_shape32,dense_tensor_shape64]
dataset = tf.data.Dataset.from_generator(generator=gen, output_types=(tf.int64,tf.int64),output_shapes= (tf.TensorShape([None, 3]),tf.TensorShape([None, 3])))
if shuffle:
dataset = dataset.shuffle(len(points))
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(lambda x,y: pc_to_tf(x,y
, dense_tensor_shape, data_format))
dataset = dataset.map(lambda x,y: process_x(x,y, dense_tensor_shape))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_size)
return dataset
# Main launcher
def input_fn_voxel_dnn(points, batch_size, dense_tensor_shape, data_format, repeat=True, shuffle=True, prefetch_size=1):
print('point shape: ', points.shape)
# Create input data pipeline.
dataset = tf.data.Dataset.from_generator(lambda: iter(points), tf.int64, tf.TensorShape([None, 3]))
if shuffle:
dataset = dataset.shuffle(len(points))
if repeat:
dataset = dataset.repeat()
dataset = dataset.map(lambda x: pc_to_tf_voxel_dnn(x, dense_tensor_shape, data_format))
dataset = dataset.map(lambda x: process_x_voxel_dnn(x, dense_tensor_shape))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_size)
return dataset
def df_to_pc(df):
points = df[['x', 'y', 'z']].values
return points
def pa_to_df(points):
cols = ['x', 'y', 'z', 'red', 'green', 'blue']
types = (['float32'] * 3) + (['uint8'] * 3)
d = {}
assert 3 <= points.shape[1] <= 6
for i in range(points.shape[1]):
col = cols[i]
dtype = types[i]
d[col] = points[:, i].astype(dtype)
df = pd.DataFrame(data=d)
return df
def pc_to_df(pc):
points = pc.points
return pa_to_df(points)
def pc_to_tf(x,y, dense_tensor_shape, data_format):
assert data_format in ['channels_last', 'channels_first']
# Add one channel (channels_last convention)
if data_format == 'channels_last':
x = tf.pad(x, [[0, 0], [0, 1]])
else:
x = tf.pad(x, [[0, 0], [1, 0]])
st0 = tf.sparse.SparseTensor(x, tf.ones_like(x[:, 0]), dense_tensor_shape[0])
# Add one channel (channels_last convention)
if data_format == 'channels_last':
y = tf.pad(y, [[0, 0], [0, 1]])
else:
y = tf.pad(y, [[0, 0], [1, 0]])
st1 = tf.sparse.SparseTensor(y, tf.ones_like(y[:, 0]), dense_tensor_shape[1])
return (st0,st1)
def process_x(x,y, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape[0])
x = tf.cast(x, tf.float32)
y = tf.sparse.to_dense(y, default_value=0, validate_indices=False)
y.set_shape(dense_tensor_shape[1])
y = tf.cast(y, tf.float32)
return (x,y)
def pc_to_tf_voxel_dnn(points, dense_tensor_shape, data_format):
x = points
assert data_format in ['channels_last', 'channels_first']
# Add one channel (channels_last convention)
if data_format == 'channels_last':
x = tf.pad(x, [[0, 0], [0, 1]])
else:
x = tf.pad(x, [[0, 0], [1, 0]])
st = tf.sparse.SparseTensor(x, tf.ones_like(x[:, 0]), dense_tensor_shape)
# print('st in pc to tf: ',st)
return st
def process_x_voxel_dnn(x, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape)
x = tf.cast(x, tf.float32)
# print('x in process x: ',x)
return x
def get_shape_data(resolution, data_format):
assert data_format in ['channels_last', 'channels_first']
bbox_min = 0
bbox_max = resolution
p_max = np.array([bbox_max, bbox_max, bbox_max])
p_min = np.array([bbox_min, bbox_min, bbox_min])
if data_format == 'channels_last':
dense_tensor_shape = np.concatenate([p_max, [1]]).astype('int64')
else:
dense_tensor_shape = np.concatenate([[1], p_max]).astype('int64')
return p_min, p_max, dense_tensor_shape
def get_files(input_glob):
return np.array(glob(input_glob, recursive=True))
def load_pc(path):
try:
pc = PyntCloud.from_file(path)
points=pc.points
ret = df_to_pc(points)
return ret
except:
return
def load_points(files, batch_size=32):
files_len = len(files)
with multiprocessing.Pool() as p:
# logger.info('Loading PCs into memory (parallel reading)')
points = np.array(list(tqdm(p.imap(load_pc, files, batch_size), total=files_len)))
return points
# blocks to occupancy maps, only for running the first time to explore pc characteristic
def pc_2_block_oc3_test(blocks, bbox_max):
no_blocks = len(blocks)
blocks_oc = np.zeros((no_blocks, bbox_max, bbox_max, bbox_max, 1), dtype=np.float32)
coor_min_max=np.zeros((no_blocks,6),dtype=np.uint32)
lower_level_ocv=[]
for i, block in enumerate(blocks):
block = block[:, 0:3]
# getting infor of block
coor_min_max[i,:3] = np.min(block,axis=0)
coor_min_max[i, 3:] = np.max(block,axis=0)
bl_points = (block - 0.01) / 2
bl_points = np.abs(bl_points)
bl_points = np.round(bl_points)
bl_points = np.unique(bl_points,axis=0)
lower_level_ocv.append(len(bl_points))
block = block.astype(np.uint32)
blocks_oc[i, block[:, 0], block[:, 1], block[:, 2], 0] = 1.0
return blocks_oc,coor_min_max,lower_level_ocv
def occupancy_map_explore_test(ply_path, pc_level, departition_level):
no_oc_voxels, blocks, binstr = get_bin_stream_blocks(ply_path, pc_level, departition_level)
bbox_max=int(2**(pc_level-departition_level))
boxes,coor_min_max,lower_level_ocv = pc_2_block_oc3_test(blocks, bbox_max)
return boxes, binstr, no_oc_voxels,coor_min_max,lower_level_ocv
#official function of pc2block oc3 and occupalncy map explore
def pc_2_block_oc3(blocks, bbox_max):
no_blocks = len(blocks)
blocks_oc = np.zeros((no_blocks, bbox_max, bbox_max, bbox_max, 1), dtype=np.float32)
for i, block in enumerate(blocks):
block = block[:, 0:3]
block = block.astype(np.uint32)
blocks_oc[i, block[:, 0], block[:, 1], block[:, 2], 0] = 1.0
return blocks_oc
def occupancy_map_explore(ply_path, pc_level, departition_level):
no_oc_voxels, blocks, binstr = get_bin_stream_blocks(ply_path, pc_level, departition_level)
boxes = pc_2_block_oc3(blocks, bbox_max=64)
return boxes, binstr, no_oc_voxels
#only for super resolution approach
def pc_2_block(ply_path, pc_level, departition_level):
departition_level = int(departition_level)
pc = PyntCloud.from_file(ply_path)
points = pc.points[['x','y','z']]
points=points.to_numpy()
bbox_max = int(2 ** pc_level)
block_oc = np.zeros((1, bbox_max, bbox_max, bbox_max, 1), dtype=np.float32)
points = points.astype(np.uint32)
block_oc[:,points[:, 0], points[:, 1], points[:, 2], 0] = 1.0
no_box=int(2**departition_level)
child_box_size=int(2**(pc_level-departition_level))
child_blocks=[]
for d in range(no_box):
for h in range(no_box):
for w in range(no_box):
child_box = block_oc[:,d * child_box_size:(d + 1) * child_box_size,
h * child_box_size:(h + 1) * child_box_size,
w * child_box_size:(w + 1) * child_box_size, :]
if(np.sum(child_box)!=0):
location=[d * child_box_size,h * child_box_size,w * child_box_size]
child_blocks.append([child_box,location])
return block_oc,child_blocks
def pc_2_xyzblock(ply_path, pc_level, departition_level):
departition_level = int(departition_level)
pc = PyntCloud.from_file(ply_path)
points = pc.points[['x','y','z']]
points=points.to_numpy()
points = points.astype(np.uint32)
no_box=int(2**departition_level)
child_box_size=int(2**(pc_level-departition_level))
child_blocks=[]
for d in range(no_box):
for h in range(no_box):
for w in range(no_box):
child_box = points[(points[:,0]>=d*child_box_size) & (points[:,0]<(d+1)*child_box_size)&(points[:,1]>=h*child_box_size) & (points[:,1]<(h+1)*child_box_size)&(points[:,2]>=w*child_box_size) & (points[:,2]<(w+1)*child_box_size)]
if(child_box.shape[0]!=0):
#child_box = child_box - np.min(child_box, axis=0)
location=[d * child_box_size,h * child_box_size,w * child_box_size]
child_blocks.append([child_box.shape[0],location])
return points,child_blocks
def pc_2_block_octree(ply_path, pc_level, departition_level):
level = int(departition_level)
pc = PyntCloud.from_file(ply_path)
points = pc.points[['x','y','z']]
no_oc_voxels = len(points)
box = int(2 ** pc_level)
blocks10_64, _ = timing(partition_octree)(points, [0, 0, 0], [box, box, box], level)#partition 10 bits pc to block 64 + octree 4
blocks10_32=[]
for bl_points in blocks10_64:
bl_points = (bl_points - 0.01) / 2
bl_points = np.abs(bl_points)
bl_points = np.round(bl_points)
bl_points = np.unique(bl_points,axis=0)
blocks10_32.append(bl_points)
blocks10_64 = pc_2_block_oc3(blocks10_64, 64)
blocks9_32 = pc_2_block_oc3(blocks10_32,32)
points = (points - 0.01) / 2
points = np.abs(points)
points = np.round(points)
points = np.unique(points,axis=0)
box = int(2 ** (pc_level-1))
blocks9_64, binstr9 = timing(partition_octree)(points, [0, 0, 0], [box, box, box], level-1)#partition 9 bits pc to block 64 + octree 3
blocks9_64=pc_2_block_oc3(blocks9_64,64)
return binstr9, blocks9_64, blocks10_64,blocks9_32, no_oc_voxels
#Octree generation from occupancy map
def depth_partition_box(binstr,box,max_level, current_level,ocv):
current_value=0
if current_level==max_level:
assert box.shape[0]==2
flatted_box=box.flat[:]
for i,bin in enumerate(flatted_box):
current_value+=bin*2**i
if bin==1:
ocv+=1
binstr.append(int(current_value))
else:
curr_bbox_max=2**(max_level-current_level+1)
child_bbox_max=int(curr_bbox_max/2)
current_value=0
child_value=[]
for d in range(2):
for h in range(2):
for w in range(2):
curr_box=box[d*child_bbox_max:(d+1)*child_bbox_max,h*child_bbox_max:(h+1)*child_bbox_max,w*child_bbox_max:(w+1)*child_bbox_max,:]
#print(curr_box.shape)
if (np.sum(curr_box)!=0.):
child_value.append(1)
_,ocv=depth_partition_box(binstr,curr_box,max_level,current_level+1,ocv)
else:
child_value.append(0)
for i,bin in enumerate(child_value):
current_value+=bin*2**i
binstr.append(current_value)
return binstr,ocv
#for block based octree coding
def depth_first_search(last_level_cnt, current_pointer, current_level, binstr, fr_table, pos_seq, level):
current_bin = format(binstr[current_pointer], '08b')
if (current_level == level):
for i in range(8):
if (current_bin[i] == '1'):
last_level_cnt += 1
else:
current_level += 1
for i in range(8):
if (current_bin[i] == '1'):
current_pointer += 1
fr_table[i, binstr[current_pointer]] += 1
pos_seq[i].append(binstr[current_pointer])
[last_level_cnt, current_pointer, fr_table, pos_seq] = depth_first_search(last_level_cnt,
current_pointer,
current_level, binstr,
fr_table, pos_seq, level)
return last_level_cnt, current_pointer, fr_table, pos_seq
def discover2(binstr, level):
current_pointer = 0
current_level = 1
fr_table = np.zeros([8, 257], dtype=np.float)
pos_seq = [[], [], [], [], [], [], [], []]
last_level_cnt = 0
[last_level_cnt, _, fr_table, pos_seq] = depth_first_search(last_level_cnt, current_pointer,
current_level, binstr, fr_table, pos_seq,
level)
fr_table = fr_table.astype(int)
return last_level_cnt, fr_table, pos_seq
def normalize_from_mesh(input_path,output_path,vg_size):
pc_mesh = PyntCloud.from_file(input_path)
print(pc_mesh)
mesh=pc_mesh.mesh
pc_mesh.points = pc_mesh.points.astype('float64', copy=False)
pc_mesh.mesh = mesh
pc = pc_mesh.get_sample("mesh_random", n=10000000, as_PyntCloud=True)
coord=['x', 'y', 'z']
points = pc.points.values
print(np.min(points, axis=0), np.max(points, axis=0),len(points))
points = points - np.min(points)
points = points / np.max(points)
points = points * (vg_size - 1)
points = np.round(points)
pc.points[coord]=points
if len(set(pc.points.columns) - set(coord)) > 0:
pc.points = pc.points.groupby(by=coord, sort=False).mean()
else:
pc.points = pc.points.drop_duplicates()
pc.to_file(output_path)
print(np.min(pc.points, axis=0), np.max(pc.points, axis=0),len(pc.points))
print('Normalized pc from ', input_path,' to ', output_path)
def normalize_pc(input_path,output_path,vg_size):
pc = PyntCloud.from_file(input_path)
coord=['x', 'y', 'z']
points = pc.points.values
points=points[:,:3]
print(points.shape)
print('original pc',np.min(points, axis=0), np.max(points, axis=0),len(points))
points = points - np.min(points)
points = points / np.max(points)
points = points * (vg_size - 1)
points = np.round(points)
points = np.unique(points, axis=0)
points=pd.DataFrame(points,columns=coord)
#points=points.drop_duplicates()
new_pc=PyntCloud(points)
new_pc.to_file(output_path)
print('new pc',np.min(new_pc.points,axis=0), np.max(new_pc.points, axis=0),len(new_pc.points))
print('Normalized pc from ', input_path,' to ', output_path)
def pmf_to_cdf(pmf):
cdf = pmf.cumsum(dim=-1)
spatial_dimensions = pmf.shape[:-1] + (1,)
zeros = torch.zeros(spatial_dimensions, dtype=pmf.dtype, device=pmf.device)
cdf_with_0 = torch.cat([zeros, cdf], dim=-1)
# On GPU, softmax followed by cumsum can lead to the final value being
# slightly bigger than 1, so we clamp.
cdf_with_0 = cdf_with_0.clamp(max=1.)
return cdf_with_0
'''
def removing_noises(path, departition_level, pc_level, rate):
pc=o3d.io.read_point_cloud(path)
ori_points=np.asarray(pc.points)
no_points=len(pc.points)
print('Starting filter out noise points')
print('File contains: ',no_points, ' points')
distances=[]
pcd_tree = o3d.geometry.KDTreeFlann(pc)
for i in range(len(pc.points)):
[_, idx, _] = pcd_tree.search_knn_vector_3d(pc.points[i], 16)
distances.append(D_distance(ori_points[idx[:],:3]))
removal=int(rate*no_points)
#print(np.asarray(distances).argsort()[:-10])
points=ori_points[np.asarray(distances).argsort()[:-removal][::-1],:]
# block partitioning
level = int(departition_level)
box = int(2 ** pc_level)
blocks, binstr2block = timing(partition_octree)(points, [0, 0, 0], [box, box, box], level)
oc_blocks = pc_2_block_oc3(blocks, bbox_max=64)
# noise octree representation
noisy_points=ori_points[np.asarray(distances).argsort()[-removal:][::-1],:]
no_noisy_points=len(noisy_points)
_, noisy_binstr = timing(partition_octree)(noisy_points, [0, 0, 0], [box, box, box], pc_level)
return oc_blocks, binstr2block, noisy_binstr,no_noisy_points
def D_distance(points):
anchor=points[0,:]
sub=np.square(points-anchor)
total=np.sum(sub,axis=1)
root=np.sqrt(total)
return np.average(root[1:])
''' | 18,459 | 38.027484 | 242 | py |
MSVoxelDNN | MSVoxelDNN-master/utils/training_tools.py |
import torch
import shutil
import numpy as np
import math as m
def compute_metric(predict, target,writer, step):
pred_label = torch.argmax(predict, dim=1)
tp = torch.count_nonzero(pred_label * target)
fp = torch.count_nonzero(pred_label * (target - 1))
tn = torch.count_nonzero((pred_label - 1) * (target - 1))
fn = torch.count_nonzero((pred_label - 1) * (target))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
accuracy = (tp + tn) / (tp + tn + fp + fn)
specificity = tn / (tn + fp)
f1 = (2 * precision * recall) / (precision + recall)
writer.add_scalar("bc/precision", precision,step)
writer.add_scalar("bc/recall", recall,step)
writer.add_scalar("bc/accuracy", accuracy,step)
writer.add_scalar("bc/specificity", specificity,step)
writer.add_scalar("bc/f1_score", f1,step)
return tp.item(), fp.item(), tn.item(), fn.item(), precision.item(), recall.item(), accuracy.item(), specificity.item(), f1.item()
def save_ckp(state, is_best, checkpoint_path, best_model_path):
"""
state: checkpoint we want to save
is_best: is this the best checkpoint; min validation loss
checkpoint_path: path to save checkpoint
best_model_path: path to save best model
"""
f_path = checkpoint_path
# save checkpoint data to the path given, checkpoint_path
torch.save(state, f_path)
# if it is a best model, min validation loss
if is_best:
best_fpath = best_model_path
# copy that checkpoint file to best path given, best_model_path
shutil.copyfile(f_path, best_fpath)
def load_ckp(checkpoint_fpath, model, optimizer):
"""
checkpoint_path: path to save checkpoint
model: model that we want to load checkpoint parameters into
optimizer: optimizer we defined in previous training
"""
# load check point
checkpoint = torch.load(checkpoint_fpath)
# initialize state_dict from checkpoint to model
model.load_state_dict(checkpoint['state_dict'])
# initialize optimizer from checkpoint to optimizer
optimizer.load_state_dict(checkpoint['optimizer'])
# initialize valid_loss_min from checkpoint to valid_loss_min
valid_loss_min = checkpoint['valid_loss_min']
# return model, optimizer, epoch value, min validation loss
return model, optimizer, checkpoint['epoch'], valid_loss_min
def Rx(theta):
return np.matrix([[1, 0, 0],
[0, m.cos(theta), -m.sin(theta)],
[0, m.sin(theta), m.cos(theta)]])
def Ry(theta):
return np.matrix([[m.cos(theta), 0, m.sin(theta)],
[0, 1, 0],
[-m.sin(theta), 0, m.cos(theta)]])
def Rz(theta):
return np.matrix([[m.cos(theta), -m.sin(theta), 0],
[m.sin(theta), m.cos(theta), 0],
[0, 0, 1]])
class Rotation(object): # randomly rotate a point cloud
def __init__(self, block_size):
self.block_size = block_size
def __call__(self, points):
# print('before',points.shape[0])
degree = np.random.randint(0, 45)+1
theta=m.pi/(180/degree)
rotmtx = [Rx, Ry, Rz]
R = rotmtx[np.random.randint(0, 2)](theta)
points = points * R
points = points - np.min(points, axis=0)
points = np.round(points)
# print('larger than block: ',np.count_nonzero(points>=self.block_size))
points = np.delete(points, np.where(np.max(points, axis=1) >= self.block_size)[0], 0)
points = np.delete(points, np.where(np.min(points, axis=1) < 0)[0], 0)
# print('after',points.shape[0])
del theta,rotmtx,R
return points
class Random_sampling(object): # randomly rotate a point cloud
#def __init__(self):
# np.random.random()/2#percent of point to be remove
def __call__(self, points):
# print('before',points.shape[0])
rates = [0, 0.125, 0.175, 0.25, 0.3]
rate = np.random.choice(rates, p=[0.5, 0.2, 0.1, 0.1,
0.1])
idx = np.random.randint(points.shape[0], size=int(points.shape[0] * (1 - rate)))
points = points[idx, :]
# print('after',points.shape[0])
del idx,rates,rate
return points
| 4,259 | 35.724138 | 134 | py |
ByteTransformer | ByteTransformer-main/unit_test/python_scripts/bert_transformer_test.py | # Copyright 2023 Bytedance Ltd. and/or its affiliates.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import timeit
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
random.seed(4)
np.random.seed(3)
torch_gen = torch.manual_seed(2)
torch.cuda.manual_seed(1)
def transpose_for_scores(x, n_heads, head_size):
# (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
new_x_shape = x.size()[:-1] + (n_heads, head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def seqlen_to_mask(lengths, max_len):
batch_size = lengths.numel()
mask = (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
return mask
def set_dtype(ts: torch.Tensor, dtype: str):
if dtype == "fp32":
return ts.float()
elif dtype == "fp16":
return ts.half()
raise RuntimeError(f"Unsupported dtype {dtype}")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("batch_size", type=int)
parser.add_argument("seqlen", type=int)
parser.add_argument("head_num", type=int)
parser.add_argument("head_size", type=int)
parser.add_argument("--n_layers", default=1, type=int, help="number of transformer layers")
parser.add_argument("--avg_seqlen", default=0, type=int, help="average seqlen, 0 for equal to seqlen")
parser.add_argument("--dtype", choices=["fp16", "fp32"], default="fp16")
parser.add_argument("--export_data", action="store_true", help="whether to export test data. "
"if true, only run pytorch to generate data, don't run ByteTransformer")
parser.add_argument("--export_data_path", default="./", type=str, help="path to export test data")
parser.add_argument("--lib_path", default="./lib/libths_bytetransformer.so",
type=str, help="lib path of torch op ext")
parser.add_argument("--iters", default=100, type=int, help="perf iterations")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
batch_size = args.batch_size
seqlen = args.seqlen
head_num = args.head_num
head_size = args.head_size
avg_seqlen = args.avg_seqlen
if avg_seqlen <= 0:
avg_seqlen = seqlen
n_layers = args.n_layers
hidden_dim = head_num * head_size
dtype = args.dtype
export_data = args.export_data
export_data_path = args.export_data_path
lib_path = args.lib_path
iters = args.iters
low, high = (2 * avg_seqlen - seqlen, seqlen + 1) if 2 * avg_seqlen > seqlen else (0, 2 * avg_seqlen + 1)
input_lens = torch.randint(low=low, high=high, size=(batch_size,))
print("input_lengths:", input_lens)
seqlen_mask = seqlen_to_mask(input_lens, seqlen)
# autopep8: off
qkv_kernel = [set_dtype(torch.empty(hidden_dim, hidden_dim * 3).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
qkv_bias = [set_dtype(torch.empty(hidden_dim * 3).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
attr_output_kernel = [set_dtype(torch.empty(hidden_dim, hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
attr_output_bias = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
attr_output_layernorm_gamma = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
attr_output_layernorm_beta = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
inter_kernel = [set_dtype(torch.empty(hidden_dim, hidden_dim * 4).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
inter_bias = [set_dtype(torch.empty(hidden_dim * 4).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
output_kernel = [set_dtype(torch.empty(hidden_dim * 4, hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
output_bias = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
output_layernorm_gamma = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
output_layernorm_beta = [set_dtype(torch.empty(hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype) for _ in range(n_layers)]
from_tensor = set_dtype(torch.empty(batch_size, seqlen, hidden_dim).uniform_(-0.4, 0.4).cuda(), dtype)
attr_mask = set_dtype(torch.tile(seqlen_mask, dims=(seqlen,)).reshape(batch_size, seqlen, seqlen).cuda(), dtype)
# autopep8: on
is_remove_padding = True
use_fused_attention = True
transformer_output = [None for _ in range(n_layers)]
with torch.no_grad():
hidden_states = from_tensor
for layer in range(n_layers):
input_tensor = hidden_states
qkv = torch.matmul(hidden_states, qkv_kernel[layer]) + qkv_bias[layer]
q, k, v = qkv.chunk(3, dim=-1)
q = transpose_for_scores(q, head_num, head_size)
k = transpose_for_scores(k, head_num, head_size)
v = transpose_for_scores(v, head_num, head_size)
# (B, H, S, W) @ (B, H, W, S) -> (B, H, S, S) -softmax-> (B, H, S, S)
scores = torch.matmul(q, k.transpose(-2, -1)) / (head_size ** .5)
scores -= 10000.0 * (1.0 - attr_mask.unsqueeze(1))
probs = F.softmax(scores, dim=-1)
# (B, H, S, S) @ (B, H, S, W) -> (B, H, S, W) -trans-> (B, S, H, W)
h = torch.matmul(probs, v).permute(0, 2, 1, 3).contiguous()
# -merge-> (B, S, D)
new_context_layer_shape = h.size()[:-2] + (hidden_dim, )
hidden_states = h.view(new_context_layer_shape)
hidden_states = torch.matmul(hidden_states, attr_output_kernel[layer]) + attr_output_bias[layer]
hidden_states = hidden_states + input_tensor
hidden_states = F.layer_norm(hidden_states, (hidden_dim, ),
weight=attr_output_layernorm_gamma[layer], bias=attr_output_layernorm_beta[layer])
residual = hidden_states
hidden_states = torch.matmul(hidden_states, inter_kernel[layer]) + inter_bias[layer]
hidden_states = F.gelu(hidden_states)
hidden_states = torch.matmul(hidden_states, output_kernel[layer]) + output_bias[layer]
hidden_states = hidden_states + residual
hidden_states = F.layer_norm(hidden_states, (hidden_dim, ),
weight=output_layernorm_gamma[layer], bias=output_layernorm_beta[layer])
transformer_output[layer] = hidden_states
if export_data:
masked_output = transformer_output[0]
masked_output = masked_output * set_dtype(seqlen_mask.unsqueeze(-1).cuda(), dtype)
all_vars = [qkv_kernel[0], qkv_bias[0],
attr_output_kernel[0], attr_output_bias[0], attr_output_layernorm_gamma[0], attr_output_layernorm_beta[0],
inter_kernel[0], inter_bias[0],
output_kernel[0], output_bias[0], output_layernorm_gamma[0], output_layernorm_beta[0],
from_tensor, attr_mask, masked_output
]
file_list = ["qkv_kernel.in", "qkv_bias.in",
"attr_output_kernel.in", "attr_output_bias.in", "attr_output_layernorm_gamma.in", "attr_output_layernorm_beta.in",
"inter_kernel.in", "inter_bias.in",
"output_kernel.in", "output_bias.in", "output_layernorm_gamma.in", "output_layernorm_beta.in",
"from_tensor.in", "atten_mask.in", "transformer_out.out"
]
idx = 0
for var in all_vars:
print(str(idx) + " " + file_list[idx] + " " +
str(var.shape) + " " + str(var.dtype))
np.savetxt(file_list[idx], set_dtype(var.flatten(), "fp32").cpu().numpy(), delimiter='\n', fmt='%f')
idx = idx + 1
else:
torch.ops.load_library(lib_path)
warmup_iters = 5
for i in range(warmup_iters + iters):
if i == warmup_iters:
t0 = timeit.default_timer()
hidden_states = from_tensor
for layer in range(n_layers):
hidden_states = torch.ops.ByteTransformer.BertTransformer(
head_num, head_size,
qkv_kernel[layer], qkv_bias[layer],
attr_output_kernel[layer], attr_output_bias[layer],
attr_output_layernorm_gamma[layer], attr_output_layernorm_beta[layer],
inter_kernel[layer], inter_bias[layer], output_kernel[layer], output_bias[layer],
output_layernorm_gamma[layer], output_layernorm_beta[layer],
hidden_states, attr_mask,
is_remove_padding, use_fused_attention)
output = hidden_states
t1 = timeit.default_timer()
masked_output = transformer_output[-1]
masked_output = masked_output * set_dtype(seqlen_mask.unsqueeze(-1).cuda(), dtype)
print("max diff:", torch.max(torch.abs(masked_output - output)).cpu())
print("time costs: {:.2f} ms".format((t1 - t0) * 1000 / iters))
| 10,261 | 48.100478 | 145 | py |
dytox | dytox-main/main.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# Modified for DyTox by Arthur Douillard
import argparse
import copy
import datetime
import json
import os
import statistics
import time
import warnings
from pathlib import Path
import yaml
from continual.pod import _local_pod
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from continuum.metrics import Logger
from continuum.tasks import split_train_val
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from continual.mixup import Mixup
import continual.utils as utils
from continual import factory, scaler
from continual.classifier import Classifier
from continual.rehearsal import Memory, get_finetuning_dataset
from continual.sam import SAM
from continual.datasets import build_dataset
from continual.engine import eval_and_log, train_one_epoch
from continual.losses import bce_with_logits, soft_bce_with_logits
warnings.filterwarnings("ignore")
def get_args_parser():
parser = argparse.ArgumentParser('DyTox training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--incremental-batch-size', default=None, type=int)
parser.add_argument('--epochs', default=500, type=int)
parser.add_argument('--base-epochs', default=500, type=int,
help='Number of epochs for base task')
parser.add_argument('--no-amp', default=False, action='store_true',
help='Disable mixed precision')
# Model parameters
parser.add_argument('--model', default='')
parser.add_argument('--input-size', default=32, type=int, help='images input size')
parser.add_argument('--patch-size', default=16, type=int)
parser.add_argument('--embed-dim', default=768, type=int)
parser.add_argument('--depth', default=12, type=int)
parser.add_argument('--num-heads', default=12, type=int)
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--norm', default='layer', choices=['layer', 'scale'],
help='Normalization layer type')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument("--incremental-lr", default=None, type=float,
help="LR to use for incremental task (t > 0)")
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--incremental-warmup-lr', type=float, default=None, metavar='LR',
help='warmup learning rate (default: 1e-6) for task T > 0')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem", "old"')
# Distillation parameters
parser.add_argument('--auto-kd', default=False, action='store_true',
help='Balance kd factor as WA https://arxiv.org/abs/1911.07053')
parser.add_argument('--kd', default=0., type=float)
parser.add_argument('--distillation-tau', default=1.0, type=float,
help='Temperature for the KD')
parser.add_argument('--resnet', default=False, action='store_true')
parser.add_argument('--pod', default=None, type=float)
parser.add_argument('--pod-scales', default=[1], type=int, nargs='+')
parser.add_argument('--pod-scaling', default=False, action='store_true')
# Dataset parameters
parser.add_argument('--data-path', default='', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output-dir', default='',
help='Dont use that')
parser.add_argument('--output-basedir', default='./checkponts/',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# Continual Learning parameters
parser.add_argument("--initial-increment", default=50, type=int,
help="Base number of classes")
parser.add_argument("--increment", default=10, type=int,
help="Number of new classes per incremental task")
parser.add_argument('--class-order', default=None, type=int, nargs='+',
help='Class ordering, a list of class ids.')
parser.add_argument("--eval-every", default=50, type=int,
help="Eval model every X epochs, if None only eval at the task end")
parser.add_argument('--debug', default=False, action='store_true',
help='Only do one batch per epoch')
parser.add_argument('--retrain-scratch', default=False, action='store_true',
help='Retrain from scratch on all data after each step (JOINT).')
parser.add_argument('--max-task', default=None, type=int,
help='Max task id to train on')
parser.add_argument('--name', default='', help='Name to display for screen')
parser.add_argument('--options', default=[], nargs='*')
# DyTox related
parser.add_argument('--dytox', action='store_true', default=False,
help='Enable super DyTox god mode.')
parser.add_argument('--ind-clf', default='', choices=['1-1', '1-n', 'n-n', 'n-1'],
help='Independent classifier per task but predicting all seen classes')
parser.add_argument('--joint-tokens', default=False, action='store_true',
help='Forward w/ all task tokens alltogether [Faster but not working as well, not sure why')
# Diversity
parser.add_argument('--head-div', default=0., type=float,
help='Use a divergent head to predict among new classes + 1 using last token')
parser.add_argument('--head-div-mode', default=['tr', 'ft'], nargs='+', type=str,
help='Only do divergence during training (tr) and/or finetuning (ft).')
# SAM-related parameters
# SAM fails with Mixed Precision, so use --no-amp
parser.add_argument('--sam-rho', default=0., type=float,
help='Rho parameters for Sharpness-Aware Minimization. Disabled if == 0.')
parser.add_argument('--sam-adaptive', default=False, action='store_true',
help='Adaptive version of SAM (more robust to rho)')
parser.add_argument('--sam-first', default='main', choices=['main', 'memory'],
help='Apply SAM first step on main or memory loader (need --sep-memory for the latter)')
parser.add_argument('--sam-second', default='main', choices=['main', 'memory'],
help='Apply SAM second step on main or memory loader (need --sep-memory for the latter)')
parser.add_argument('--sam-skip-first', default=False, action='store_true',
help='Dont use SAM for first task')
parser.add_argument('--sam-final', default=None, type=float,
help='Final value of rho is it is changed linearly per task.')
parser.add_argument('--sam-div', default='', type=str,
choices=['old_no_upd'],
help='SAM for diversity')
parser.add_argument('--sam-mode', default=['tr', 'ft'], nargs='+', type=str,
help='Only do SAM during training (tr) and/or finetuning (ft).')
parser.add_argument('--look-sam-k', default=0, type=int,
help='Apply look sam every K updates (see under review ICLR22)')
parser.add_argument('--look-sam-alpha', default=0.7, type=float,
help='Alpha factor of look sam to weight gradient reuse, 0 < alpha <= 1')
# Rehearsal memory
parser.add_argument('--memory-size', default=2000, type=int,
help='Total memory size in number of stored (image, label).')
parser.add_argument('--distributed-memory', default=False, action='store_true',
help='Use different rehearsal memory per process.')
parser.add_argument('--global-memory', default=False, action='store_false', dest='distributed_memory',
help='Use same rehearsal memory for all process.')
parser.set_defaults(distributed_memory=True)
parser.add_argument('--oversample-memory', default=1, type=int,
help='Amount of time we repeat the same rehearsal.')
parser.add_argument('--oversample-memory-ft', default=1, type=int,
help='Amount of time we repeat the same rehearsal for finetuning, only for old classes not new classes.')
parser.add_argument('--rehearsal-test-trsf', default=False, action='store_true',
help='Extract features without data augmentation.')
parser.add_argument('--rehearsal-modes', default=1, type=int,
help='Select N on a single gpu, but with mem_size/N.')
parser.add_argument('--fixed-memory', default=False, action='store_true',
help='Dont fully use memory when no all classes are seen as in Hou et al. 2019')
parser.add_argument('--rehearsal', default="random",
choices=[
'random',
'closest_token', 'closest_all',
'icarl_token', 'icarl_all',
'furthest_token', 'furthest_all'
],
help='Method to herd sample for rehearsal.')
parser.add_argument('--sep-memory', default=False, action='store_true',
help='Dont merge memory w/ task dataset but keep it alongside')
parser.add_argument('--replay-memory', default=0, type=int,
help='Replay memory according to Guido rule [NEED DOC]')
# Finetuning
parser.add_argument('--finetuning', default='', choices=['balanced'],
help='Whether to do a finetuning after each incremental task. Backbone are frozen.')
parser.add_argument('--finetuning-epochs', default=30, type=int,
help='Number of epochs to spend in finetuning.')
parser.add_argument('--finetuning-lr', default=5e-5, type=float,
help='LR during finetuning, will be kept constant.')
parser.add_argument('--finetuning-teacher', default=False, action='store_true',
help='Use teacher/old model during finetuning for all kd related.')
parser.add_argument('--finetuning-resetclf', default=False, action='store_true',
help='Reset classifier before finetuning phase (similar to GDumb/DER).')
parser.add_argument('--only-ft', default=False, action='store_true',
help='Only train on FT data')
parser.add_argument('--ft-no-sampling', default=False, action='store_true',
help='Dont use particular sampling for the finetuning phase.')
# What to freeze
parser.add_argument('--freeze-task', default=[], nargs="*", type=str,
help='What to freeze before every incremental task (t > 0).')
parser.add_argument('--freeze-ft', default=[], nargs="*", type=str,
help='What to freeze before every finetuning (t > 0).')
parser.add_argument('--freeze-eval', default=False, action='store_true',
help='Frozen layers are put in eval. Important for stoch depth')
# Convit - CaiT
parser.add_argument('--local-up-to-layer', default=10, type=int,
help='number of GPSA layers')
parser.add_argument('--locality-strength', default=1., type=float,
help='Determines how focused each head is around its attention center')
parser.add_argument('--class-attention', default=False, action='store_true',
help='Freeeze and Process the class token as done in CaiT')
# Logs
parser.add_argument('--log-path', default="logs")
parser.add_argument('--log-category', default="misc")
# Classification
parser.add_argument('--bce-loss', default=False, action='store_true')
# distributed training parameters
parser.add_argument('--local_rank', default=None, type=int)
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# Resuming
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start-task', default=0, type=int, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='resume from checkpoint')
parser.add_argument('--save-every-epoch', default=None, type=int)
parser.add_argument('--validation', default=0.0, type=float,
help='Use % of the training set as val, replacing the test.')
return parser
def main(args):
print(args)
logger = Logger(list_subsets=['train', 'test'])
use_distillation = args.auto_kd
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
scenario_train, args.nb_classes = build_dataset(is_train=True, args=args)
scenario_val, _ = build_dataset(is_train=False, args=args)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
model = factory.get_backbone(args)
model.head = Classifier(
model.embed_dim, args.nb_classes, args.initial_increment,
args.increment, len(scenario_train)
)
model.to(device)
# model will be on multiple GPUs, while model_without_ddp on a single GPU, but
# it's actually the same model.
model_without_ddp = model
n_parameters = sum(p.numel() for p in model_without_ddp.parameters() if p.requires_grad)
# Start the logging process on disk ----------------------------------------
if args.name:
log_path = os.path.join(args.log_dir, f"logs_{args.trial_id}.json")
long_log_path = os.path.join(args.log_dir, f"long_logs_{args.trial_id}.json")
if utils.is_main_process():
os.system("echo '\ek{}\e\\'".format(args.name))
os.makedirs(args.log_dir, exist_ok=True)
with open(os.path.join(args.log_dir, f"config_{args.trial_id}.json"), 'w+') as f:
config = vars(args)
config["nb_parameters"] = n_parameters
json.dump(config, f, indent=2)
with open(log_path, 'w+') as f:
pass # touch
with open(long_log_path, 'w+') as f:
pass # touch
log_store = {'results': {}}
args.output_dir = os.path.join(
args.output_basedir,
f"{datetime.datetime.now().strftime('%y-%m-%d')}_{args.data_set}-{args.initial_increment}-{args.increment}_{args.name}_{args.trial_id}"
)
else:
log_store = None
log_path = long_log_path = None
if args.output_dir and utils.is_main_process():
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
if args.distributed:
torch.distributed.barrier()
print('number of params:', n_parameters)
loss_scaler = scaler.ContinualScaler(args.no_amp)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0. or args.cutmix > 0.:
criterion = SoftTargetCrossEntropy()
if args.bce_loss:
criterion = soft_bce_with_logits
elif args.bce_loss:
criterion = bce_with_logits
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
output_dir = Path(args.output_dir)
memory = None
if args.memory_size > 0:
memory = Memory(
args.memory_size, scenario_train.nb_classes, args.rehearsal, args.fixed_memory, args.rehearsal_modes
)
nb_classes = args.initial_increment
base_lr = args.lr
accuracy_list = []
start_time = time.time()
if args.debug:
args.base_epochs = 1
args.epochs = 1
args.increment_per_task = [args.initial_increment] + [args.increment for _ in range(len(scenario_train) - 1)]
# --------------------------------------------------------------------------
#
# Begin of the task loop
#
# --------------------------------------------------------------------------
dataset_true_val = None
for task_id, dataset_train in enumerate(scenario_train):
if args.max_task == task_id:
print(f"Stop training because of max task")
break
print(f"Starting task id {task_id}/{len(scenario_train) - 1}")
# ----------------------------------------------------------------------
# Data
dataset_val = scenario_val[:task_id + 1]
if args.validation > 0.: # use validation split instead of test
if task_id == 0:
dataset_train, dataset_val = split_train_val(dataset_train, args.validation)
dataset_true_val = dataset_val
else:
dataset_train, dataset_val = split_train_val(dataset_train, args.validation)
dataset_true_val.concat(dataset_val)
dataset_val = dataset_true_val
for i in range(3): # Quick check to ensure same preprocessing between train/test
assert abs(dataset_train.trsf.transforms[-1].mean[i] - dataset_val.trsf.transforms[-1].mean[i]) < 0.0001
assert abs(dataset_train.trsf.transforms[-1].std[i] - dataset_val.trsf.transforms[-1].std[i]) < 0.0001
loader_memory = None
if task_id > 0 and memory is not None:
dataset_memory = memory.get_dataset(dataset_train)
loader_memory = factory.InfiniteLoader(factory.get_train_loaders(
dataset_memory, args,
args.replay_memory if args.replay_memory > 0 else args.batch_size
))
if not args.sep_memory:
previous_size = len(dataset_train)
for _ in range(args.oversample_memory):
dataset_train.add_samples(*memory.get())
print(f"{len(dataset_train) - previous_size} samples added from memory.")
if args.only_ft:
dataset_train = get_finetuning_dataset(dataset_train, memory, 'balanced')
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Initializing teacher model from previous task
if use_distillation and task_id > 0:
teacher_model = copy.deepcopy(model_without_ddp)
teacher_model.freeze(['all'])
teacher_model.eval()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Ensembling
if args.dytox:
model_without_ddp = factory.update_dytox(model_without_ddp, task_id, args)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Adding new parameters to handle the new classes
print("Adding new parameters")
if task_id > 0 and not args.dytox:
model_without_ddp.head.add_classes()
if task_id > 0:
model_without_ddp.freeze(args.freeze_task)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Debug: Joint training from scratch on all previous data
if args.retrain_scratch:
model_without_ddp.init_params()
dataset_train = scenario_train[:task_id+1]
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Data
loader_train, loader_val = factory.get_loaders(dataset_train, dataset_val, args)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Learning rate and optimizer
if task_id > 0 and args.incremental_batch_size:
args.batch_size = args.incremental_batch_size
if args.incremental_lr is not None and task_id > 0:
linear_scaled_lr = args.incremental_lr * args.batch_size * utils.get_world_size() / 512.0
else:
linear_scaled_lr = base_lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
lr_scheduler, _ = create_scheduler(args, optimizer)
# ----------------------------------------------------------------------
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=nb_classes,
loader_memory=loader_memory
)
skipped_task = False
initial_epoch = epoch = 0
if args.resume and args.start_task > task_id:
utils.load_first_task_model(model_without_ddp, loss_scaler, task_id, args)
print("Skipping first task")
epochs = 0
train_stats = {"task_skipped": str(task_id)}
skipped_task = True
elif args.base_epochs is not None and task_id == 0:
epochs = args.base_epochs
else:
epochs = args.epochs
if args.distributed:
del model
model = torch.nn.parallel.DistributedDataParallel(
model_without_ddp, device_ids=[args.gpu], find_unused_parameters=True)
torch.distributed.barrier()
else:
model = model_without_ddp
model_without_ddp.nb_epochs = epochs
model_without_ddp.nb_batch_per_epoch = len(loader_train)
# Init SAM, for DyTox++ (see appendix) ---------------------------------
sam = None
if args.sam_rho > 0. and 'tr' in args.sam_mode and ((task_id > 0 and args.sam_skip_first) or not args.sam_skip_first):
if args.sam_final is not None:
sam_step = (args.sam_final - args.sam_rho) / scenario_train.nb_tasks
sam_rho = args.sam_rho + task_id * sam_step
else:
sam_rho = args.sam_rho
print(f'Initialize SAM with rho={sam_rho}')
sam = SAM(
optimizer, model_without_ddp,
rho=sam_rho, adaptive=args.sam_adaptive,
div=args.sam_div,
use_look_sam=args.look_sam_k > 0, look_sam_alpha=args.look_sam_alpha
)
# ----------------------------------------------------------------------
print(f"Start training for {epochs-initial_epoch} epochs")
max_accuracy = 0.0
for epoch in range(initial_epoch, epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, loader_train,
optimizer, device, epoch, task_id, loss_scaler,
args.clip_grad, mixup_fn,
debug=args.debug,
args=args,
teacher_model=teacher_model,
model_without_ddp=model_without_ddp,
sam=sam,
loader_memory=loader_memory,
pod=args.pod if task_id > 0 else None, pod_scales=args.pod_scales
)
lr_scheduler.step(epoch)
if args.save_every_epoch is not None and epoch % args.save_every_epoch == 0:
if os.path.isdir(args.resume):
with open(os.path.join(args.resume, 'save_log.txt'), 'w+') as f:
f.write(f'task={task_id}, epoch={epoch}\n')
checkpoint_paths = [os.path.join(args.resume, f'checkpoint_{task_id}.pth')]
for checkpoint_path in checkpoint_paths:
if (task_id < args.start_task and args.start_task > 0) and os.path.isdir(args.resume) and os.path.exists(checkpoint_path):
continue
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'task_id': task_id,
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
if args.eval_every and (epoch % args.eval_every == 0 or (args.finetuning and epoch == epochs - 1)):
eval_and_log(
args, output_dir, model, model_without_ddp, optimizer, lr_scheduler,
epoch, task_id, loss_scaler, max_accuracy,
[], n_parameters, device, loader_val, train_stats, None, long_log_path,
logger, model_without_ddp.epoch_log()
)
logger.end_epoch()
if memory is not None and args.distributed_memory:
task_memory_path = os.path.join(args.resume, f'dist_memory_{task_id}-{utils.get_rank()}.npz')
if os.path.isdir(args.resume) and os.path.exists(task_memory_path):
# Resuming this task step, thus reloading saved memory samples
# without needing to re-compute them
memory.load(task_memory_path)
else:
task_set_to_rehearse = scenario_train[task_id]
if args.rehearsal_test_trsf:
task_set_to_rehearse.trsf = scenario_val[task_id].trsf
memory.add(task_set_to_rehearse, model, args.initial_increment if task_id == 0 else args.increment)
#memory.add(scenario_train[task_id], model, args.initial_increment if task_id == 0 else args.increment)
if args.resume != '':
memory.save(task_memory_path)
else:
memory.save(os.path.join(args.output_dir, f'dist_memory_{task_id}-{utils.get_rank()}.npz'))
if memory is not None and not args.distributed_memory:
task_memory_path = os.path.join(args.resume, f'memory_{task_id}.npz')
if utils.is_main_process():
if os.path.isdir(args.resume) and os.path.exists(task_memory_path):
# Resuming this task step, thus reloading saved memory samples
# without needing to re-compute them
memory.load(task_memory_path)
else:
task_set_to_rehearse = scenario_train[task_id]
if args.rehearsal_test_trsf:
task_set_to_rehearse.trsf = scenario_val[task_id].trsf
memory.add(task_set_to_rehearse, model, args.initial_increment if task_id == 0 else args.increment)
if args.resume != '':
memory.save(task_memory_path)
else:
memory.save(os.path.join(args.output_dir, f'memory_{task_id}-{utils.get_rank()}.npz'))
assert len(memory) <= args.memory_size, (len(memory), args.memory_size)
torch.distributed.barrier()
if not utils.is_main_process():
if args.resume != '':
memory.load(task_memory_path)
else:
memory.load(os.path.join(args.output_dir, f'memory_{task_id}-0.npz'))
memory.save(os.path.join(args.output_dir, f'memory_{task_id}-{utils.get_rank()}.npz'))
torch.distributed.barrier()
# ----------------------------------------------------------------------
# FINETUNING
# ----------------------------------------------------------------------
# Init SAM, for DyTox++ (see appendix) ---------------------------------
sam = None
if args.sam_rho > 0. and 'ft' in args.sam_mode and ((task_id > 0 and args.sam_skip_first) or not args.sam_skip_first):
if args.sam_final is not None:
sam_step = (args.sam_final - args.sam_rho) / scenario_train.nb_tasks
sam_rho = args.sam_rho + task_id * sam_step
else:
sam_rho = args.sam_rho
print(f'Initialize SAM with rho={sam_rho}')
sam = SAM(
optimizer, model_without_ddp,
rho=sam_rho, adaptive=args.sam_adaptive,
div=args.sam_div,
use_look_sam=args.look_sam_k > 0, look_sam_alpha=args.look_sam_alpha
)
# ----------------------------------------------------------------------
if args.finetuning and memory and (task_id > 0 or scenario_train.nb_classes == args.initial_increment) and not skipped_task:
dataset_finetune = get_finetuning_dataset(dataset_train, memory, args.finetuning, args.oversample_memory_ft, task_id)
print(f'Finetuning phase of type {args.finetuning} with {len(dataset_finetune)} samples.')
loader_finetune, loader_val = factory.get_loaders(dataset_finetune, dataset_val, args, finetuning=True)
print(f'Train-ft and val loaders of lengths: {len(loader_finetune)} and {len(loader_val)}.')
if args.finetuning_resetclf:
model_without_ddp.reset_classifier()
model_without_ddp.freeze(args.freeze_ft)
if args.distributed:
del model
model = torch.nn.parallel.DistributedDataParallel(model_without_ddp, device_ids=[args.gpu], find_unused_parameters=True)
torch.distributed.barrier()
else:
model = model_without_ddp
model_without_ddp.begin_finetuning()
args.lr = args.finetuning_lr * args.batch_size * utils.get_world_size() / 512.0
optimizer = create_optimizer(args, model_without_ddp)
for epoch in range(args.finetuning_epochs):
if args.distributed and hasattr(loader_finetune.sampler, 'set_epoch'):
loader_finetune.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, loader_finetune,
optimizer, device, epoch, task_id, loss_scaler,
args.clip_grad, mixup_fn,
debug=args.debug,
args=args,
teacher_model=teacher_model if args.finetuning_teacher else None,
model_without_ddp=model_without_ddp,
pod=args.pod if task_id > 0 else None, pod_scales=args.pod_scales
)
if epoch % 10 == 0 or epoch == args.finetuning_epochs - 1:
eval_and_log(
args, output_dir, model, model_without_ddp, optimizer, lr_scheduler,
epoch, task_id, loss_scaler, max_accuracy,
[], n_parameters, device, loader_val, train_stats, None, long_log_path,
logger, model_without_ddp.epoch_log()
)
logger.end_epoch()
model_without_ddp.end_finetuning()
eval_and_log(
args, output_dir, model, model_without_ddp, optimizer, lr_scheduler,
epoch, task_id, loss_scaler, max_accuracy,
accuracy_list, n_parameters, device, loader_val, train_stats, log_store, log_path,
logger, model_without_ddp.epoch_log(), skipped_task
)
logger.end_task()
nb_classes += args.increment
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
print(f'Setting {args.data_set} with {args.initial_increment}-{args.increment}')
print(f"All accuracies: {accuracy_list}")
print(f"Average Incremental Accuracy: {statistics.mean(accuracy_list)}")
if args.name:
print(f"Experiment name: {args.name}")
log_store['summary'] = {"avg": statistics.mean(accuracy_list)}
if log_path is not None and utils.is_main_process():
with open(log_path, 'a+') as f:
f.write(json.dumps(log_store['summary']) + '\n')
def load_options(args, options):
varargs = vars(args)
name = []
for o in options:
with open(o) as f:
new_opts = yaml.safe_load(f)
for k, v in new_opts.items():
if k not in varargs:
raise ValueError(f'Option {k}={v} doesnt exist!')
varargs.update(new_opts)
name.append(o.split("/")[-1].replace('.yaml', ''))
return '_'.join(name)
if __name__ == '__main__':
parser = argparse.ArgumentParser('DyTox training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
utils.init_distributed_mode(args)
if args.options:
name = load_options(args, args.options)
if not args.name:
args.name = name
args.log_dir = os.path.join(
args.log_path, args.data_set.lower(), args.log_category,
datetime.datetime.now().strftime('%y-%m'),
f"week-{int(datetime.datetime.now().strftime('%d')) // 7 + 1}",
f"{int(datetime.datetime.now().strftime('%d'))}_{args.name}"
)
if isinstance(args.class_order, list) and isinstance(args.class_order[0], list):
print(f'Running {len(args.class_order)} different class orders.')
class_orders = copy.deepcopy(args.class_order)
for i, order in enumerate(class_orders, start=1):
print(f'Running class ordering {i}/{len(class_orders)}.')
args.trial_id = i
args.class_order = order
main(args)
else:
args.trial_id = 1
main(args)
| 41,049 | 49.244798 | 147 | py |
dytox | dytox-main/continual/scaler.py | import torch
from timm.utils import dispatch_clip_grad
class ContinualScaler:
state_dict_key = "amp_scaler"
def __init__(self, disable_amp):
self._scaler = torch.cuda.amp.GradScaler(enabled=not disable_amp)
def __call__(
self, loss, optimizer, model_without_ddp, clip_grad=None, clip_mode='norm',
parameters=None, create_graph=False,
hook=True
):
self.pre_step(loss, optimizer, parameters, create_graph, clip_grad, clip_mode)
self.post_step(optimizer, model_without_ddp, hook)
def pre_step(self, loss, optimizer, parameters=None, create_graph=False, clip_grad=None, clip_mode='norm'):
self._scaler.scale(loss).backward(create_graph=create_graph)
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
if clip_grad is not None:
assert parameters is not None
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
def post_step(self, optimizer, model_without_ddp, hook=True):
if hook and hasattr(model_without_ddp, 'hook_before_update'):
model_without_ddp.hook_before_update()
self._scaler.step(optimizer)
if hook and hasattr(model_without_ddp, 'hook_after_update'):
model_without_ddp.hook_after_update()
self.update()
def update(self):
self._scaler.update()
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
| 1,563 | 33 | 111 | py |
dytox | dytox-main/continual/losses.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implements the knowledge distillation loss
"""
import torch
from torch import nn
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
def bce_with_logits(x, y):
return F.binary_cross_entropy_with_logits(
x,
torch.eye(x.shape[1])[y].to(y.device)
)
def soft_bce_with_logits(x, y):
return F.binary_cross_entropy_with_logits(
x, y)
def bce_smooth_pos_with_logits(smooth):
def _func(x, y):
return F.binary_cross_entropy_with_logits(
x,
torch.clamp(
torch.eye(x.shape[1])[y].to(y.device) - smooth,
min=0.0
)
)
return _func
def bce_smooth_posneg_with_logits(smooth):
def _func(x, y):
return F.binary_cross_entropy_with_logits(
x,
torch.clamp(
torch.eye(x.shape[1])[y].to(y.device) + smooth,
max=1 - smooth
)
)
return _func
class LabelSmoothingCrossEntropyBoosting(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1, alpha=1, gamma=1):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super().__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
self.alpha = alpha
self.gamma = gamma
def forward(self, x, target, boosting_output=None, boosting_focal=None):
if boosting_output is None:
return self._base_loss(x, target)
return self._focal_loss(x, target, boosting_output, boosting_focal)
def _focal_loss(self, x, target, boosting_output, boosting_focal):
logprobs = F.log_softmax(x, dim=-1)
if boosting_focal == 'old':
pt = boosting_output.softmax(-1)[..., :-1]
f = torch.ones_like(logprobs)
f[:, :boosting_output.shape[1] - 1] = self.alpha * (1 - pt) ** self.gamma
logprobs = f * logprobs
elif boosting_focal == 'new':
pt = boosting_output.softmax(-1)[..., -1]
nb_old_classes = boosting_output.shape[1] - 1
f = torch.ones_like(logprobs)
f[:, nb_old_classes:] = self.alpha * (1 - pt[:, None]) ** self.gamma
logprobs = f * logprobs
else:
assert False, (boosting_focal)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
def _base_loss(self, x, target):
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class SoftTargetCrossEntropyBoosting(nn.Module):
def __init__(self, alpha=1, gamma=1):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, x, target, boosting_output=None, boosting_focal=None):
if boosting_output is None:
return torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1).mean()
if boosting_focal == 'old':
pt = boosting_output.softmax(-1)[..., :-1]
f = torch.ones_like(x)
f[:, :boosting_output.shape[1] - 1] = self.alpha * (1 - pt) ** self.gamma
elif boosting_focal == 'new':
pt = boosting_output.softmax(-1)[..., -1]
nb_old_classes = boosting_output.shape[1] - 1
f = torch.ones_like(x)
f[:, nb_old_classes:] = self.alpha * (1 - pt[:, None]) ** self.gamma
else:
assert False, (boosting_focal)
return torch.sum(-target * f * F.log_softmax(x, dim=-1), dim=-1).mean()
| 6,637 | 34.308511 | 114 | py |
dytox | dytox-main/continual/engine.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import json
import os
import math
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy
from timm.loss import SoftTargetCrossEntropy
from torch.nn import functional as F
import continual.utils as utils
from continual.losses import DistillationLoss
from continual.pod import pod_loss
CE = SoftTargetCrossEntropy()
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, task_id: int, loss_scaler, max_norm: float = 0,
mixup_fn: Optional[Mixup] = None,
set_training_mode=True, debug=False, args=None,
teacher_model: torch.nn.Module = None,
model_without_ddp: torch.nn.Module = None,
sam: torch.optim.Optimizer = None,
loader_memory=None,
pod=None, pod_scales=[1]):
"""Code is a bit ugly to handle SAM, sorry! :upside_down_face:"""
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Task: [{}] Epoch: [{}]'.format(task_id, epoch)
print_freq = 10
for batch_index, (samples, targets, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if batch_index == 0:
print(f'Image size is {samples.shape}.')
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
optimizer.zero_grad()
lam = None
if mixup_fn is not None:
samples, targets, lam = mixup_fn(samples, targets)
if sam is not None and (args.sam_first == 'memory' and task_id > 0):
# If you want to do the first step of SAM only on memory samples.
x, y, _ = loader_memory.get()
x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)
with torch.cuda.amp.autocast(enabled=not args.no_amp):
loss_tuple = forward(x, y, model, teacher_model, criterion, lam, args)
else:
with torch.cuda.amp.autocast(enabled=not args.no_amp):
loss_tuple = forward(samples, targets, model, teacher_model, criterion, lam, args)
loss = sum(filter(lambda x: x is not None, loss_tuple))
internal_losses = model_without_ddp.get_internal_losses(loss)
for internal_loss_value in internal_losses.values():
loss += internal_loss_value
if pod is not None and teacher_model is not None:
if args.pod_scaling:
nb_classes = sum(model.module.nb_classes_per_task)
nb_new_classes = model.module.nb_classes_per_task[-1]
pod_scaling = math.sqrt(nb_new_classes / nb_classes)
else:
pod_scaling = 1.0
loss += pod_scaling * pod * compute_pod(
model.module.feats, teacher_model.feats, pod_scales)
check_loss(loss)
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
if sam is not None and args.look_sam_k > 0:
# Look-sam only apply the costly sam estimation every k step.
look_sam_update = False
if batch_index % args.look_sam_k == 0:
loss_scaler.pre_step(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scaler.update()
sam.first_step() # modify weights to worse neighbor
optimizer.zero_grad()
look_sam_update = True
with torch.cuda.amp.autocast(enabled=not args.no_amp):
loss_tuple = forward(samples, targets, model, teacher_model, criterion, lam, args)
loss = sum(filter(lambda x: x is not None, loss_tuple))
internal_losses = model_without_ddp.get_internal_losses(loss)
for internal_loss_value in internal_losses.values():
loss += internal_loss_value
check_loss(loss)
loss_scaler.pre_step(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
sam.second_step(look_sam_update=look_sam_update)
loss_scaler.post_step(optimizer, model_without_ddp)
elif sam is not None:
loss_scaler.pre_step(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scaler.update()
sam.first_step() # modify weights to worse neighbor
optimizer.zero_grad()
if args.sam_second == 'memory' and task_id > 0:
x, y, _ = loader_memory.get()
x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)
with torch.cuda.amp.autocast(enabled=not args.no_amp):
loss_tuple = forward(x, y, model, teacher_model, criterion, lam, args)
else:
with torch.cuda.amp.autocast(enabled=not args.no_amp):
loss_tuple = forward(samples, targets, model, teacher_model, criterion, lam, args)
loss = sum(filter(lambda x: x is not None, loss_tuple))
internal_losses = model_without_ddp.get_internal_losses(loss)
for internal_loss_value in internal_losses.values():
loss += internal_loss_value
check_loss(loss)
loss_scaler.pre_step(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
sam.second_step()
loss_scaler.post_step(optimizer, model_without_ddp)
else:
loss_scaler(loss, optimizer, model_without_ddp, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
metric_logger.update_dict(internal_losses)
metric_logger.update(loss=loss_tuple[0])
metric_logger.update(kd=loss_tuple[1])
metric_logger.update(div=loss_tuple[2])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if debug:
print('Debug, only doing one epoch!')
break
if hasattr(model_without_ddp, 'hook_after_epoch'):
model_without_ddp.hook_after_epoch()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def check_loss(loss):
if not math.isfinite(loss.item()):
raise Exception('Loss is {}, stopping training'.format(loss.item()))
def forward(samples, targets, model, teacher_model, criterion, lam, args):
main_output, div_output = None, None
outputs = model(samples)
if isinstance(outputs, dict):
main_output = outputs['logits']
div_output = outputs['div']
else:
main_output = outputs
loss = criterion(main_output, targets)
if teacher_model is not None:
with torch.no_grad():
main_output_old = None
teacher_outputs = teacher_model(samples)
if isinstance(outputs, dict):
main_output_old = teacher_outputs['logits']
else:
main_output_old = teacher_outputs
kd_loss = None
if teacher_model is not None:
logits_for_distil = main_output[:, :main_output_old.shape[1]]
kd_loss = 0.
if args.auto_kd:
# Knowledge distillation on the probabilities
# I called that 'auto_kd' because the right factor is automatically
# computed, by interpolation between the main loss and the KD loss.
# This is strongly inspired by WA (CVPR 2020) --> https://arxiv.org/abs/1911.07053
lbd = main_output_old.shape[1] / main_output.shape[1]
loss = (1 - lbd) * loss
kd_factor = lbd
tau = args.distillation_tau
_kd_loss = F.kl_div(
F.log_softmax(logits_for_distil / tau, dim=1),
F.log_softmax(main_output_old / tau, dim=1),
reduction='mean',
log_target=True
) * (tau ** 2)
kd_loss += kd_factor * _kd_loss
elif args.kd > 0.:
_kd_loss = F.kl_div(
F.log_softmax(logits_for_distil / tau, dim=1),
F.log_softmax(main_output_old / tau, dim=1),
reduction='mean',
log_target=True
) * (tau ** 2)
kd_loss += args.kd * _kd_loss
div_loss = None
if div_output is not None:
# For the divergence heads, we need to create new targets.
# If a class belong to old tasks, it will be 0.
# If a class belong to the new task, it will be a class id between
# 1 (not 0!) and 'nb_class_in_new_task'.
# When doing that with mixup, some trickery is needed. (see if lam is not None).
nb_classes = main_output.shape[1]
nb_new_classes = div_output.shape[1] - 1
nb_old_classes = nb_classes - nb_new_classes
if lam is not None: # 'lam' is the interpolation Lambda of mixup
# If using mixup / cutmix
div_targets = torch.zeros_like(div_output)
nb_classes = main_output.shape[1]
nb_new_classes = div_output.shape[1] - 1
nb_old_classes = nb_classes - nb_new_classes
div_targets[:, 0] = targets[:, :nb_old_classes].sum(-1)
div_targets[:, 1:] = targets[:, nb_old_classes:]
else:
div_targets = torch.clone(targets)
mask_old_cls = div_targets < nb_old_classes
mask_new_cls = ~mask_old_cls
div_targets[mask_old_cls] = 0
div_targets[mask_new_cls] -= nb_old_classes - 1
div_loss = args.head_div * criterion(div_output, div_targets)
return loss, kd_loss, div_loss
def compute_pod(feats, old_feats, scales):
if len(feats[0].shape) == 3:
# transformer archi and not cnn
bs, nb_tokens, dim = feats[0].shape
w = int(math.sqrt(nb_tokens))
feats = [f.view(bs, w, w, dim) for f in feats]
old_feats = [f.view(bs, w, w, dim) for f in old_feats]
return pod_loss(feats, old_feats, scales)
@torch.no_grad()
def evaluate(data_loader, model, device, logger):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target, task_ids in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
if isinstance(output, dict):
output = output['logits']
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, min(5, output.shape[1])))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
logger.add([output.cpu().argmax(dim=1), target.cpu(), task_ids], subset='test')
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def eval_and_log(args, output_dir, model, model_without_ddp, optimizer, lr_scheduler,
epoch, task_id, loss_scaler, max_accuracy, accuracy_list,
n_parameters, device, data_loader_val, train_stats, log_store, log_path, logger,
model_log, skipped_task=False):
if args.output_dir:
if os.path.isdir(args.resume):
checkpoint_paths = [os.path.join(args.resume, f'checkpoint_{task_id}.pth')]
else:
checkpoint_paths = [output_dir / f'checkpoint_{task_id}.pth']
for checkpoint_path in checkpoint_paths:
if skipped_task:
continue
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'task_id': task_id,
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device, logger)
print(f"Accuracy of the network on the {len(data_loader_val.dataset)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
accuracy_list.append(test_stats['acc1'])
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
mean_acc5 = -1.0
if log_store is not None:
log_store['results'][task_id] = log_stats
all_acc5 = [task_log['test_acc5'] for task_log in log_store['results'].values()]
mean_acc5 = sum(all_acc5) / len(all_acc5)
if log_path is not None and utils.is_main_process():
with open(log_path, 'a+') as f:
f.write(json.dumps({
'task': task_id,
'epoch': epoch,
'acc': round(100 * logger.accuracy, 2),
'avg_acc': round(100 * logger.average_incremental_accuracy, 2),
'forgetting': round(100 * logger.forgetting, 6),
'acc_per_task': [round(100 * acc_t, 2) for acc_t in logger.accuracy_per_task],
'train_lr': log_stats.get('train_lr', 0.),
'bwt': round(100 * logger.backward_transfer, 2),
'fwt': round(100 * logger.forward_transfer, 2),
'test_acc1': round(log_stats['test_acc1'], 2),
'test_acc5': round(log_stats['test_acc5'], 2),
'mean_acc5': round(mean_acc5, 2),
'train_loss': round(log_stats.get('train_loss', 0.), 5),
'test_loss': round(log_stats['test_loss'], 5),
**model_log
}) + '\n')
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
return max_accuracy
def indexes_task_outputs(logits, targets, increment_per_task):
if increment_per_task[0] != increment_per_task[1]:
raise NotImplementedError(f'Not supported yet for non equal task size')
inc = increment_per_task[0]
indexes = torch.zeros(len(logits), inc).long()
for r in range(indexes.shape[0]):
for c in range(indexes.shape[1]):
indexes[r, c] = (targets[r] // inc) * inc + r * logits.shape[1] + c
indexed_logits = logits.view(-1)[indexes.view(-1)].view(len(logits), inc)
indexed_targets = targets % inc
return indexed_logits, indexed_targets
| 15,851 | 40.389034 | 114 | py |
dytox | dytox-main/continual/convit.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
import copy
import math
from functools import lru_cache
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import continual.utils as cutils
class BatchEnsemble(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.out_features, self.in_features = out_features, in_features
self.bias = bias
self.r = nn.Parameter(torch.randn(self.out_features))
self.s = nn.Parameter(torch.randn(self.in_features))
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls, self.in_features, self.out_features, self.bias)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
result.linear.weight = self.linear.weight
return result
def reset_parameters(self):
device = self.linear.weight.device
self.r = nn.Parameter(torch.randn(self.out_features).to(device))
self.s = nn.Parameter(torch.randn(self.in_features).to(device))
if self.bias:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.linear.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(self.linear.bias, -bound, bound)
def forward(self, x):
w = torch.outer(self.r, self.s)
w = w * self.linear.weight
return F.linear(x, w, self.linear.bias)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., fc=nn.Linear):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = fc(in_features, hidden_features)
self.act = act_layer()
self.fc2 = fc(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, BatchEnsemble):
trunc_normal_(m.linear.weight, std=.02)
if isinstance(m.linear, nn.Linear) and m.linear.bias is not None:
nn.init.constant_(m.linear.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, fc=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
def reset_parameters(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn, v
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1,-1)
pos_score = self.pos_proj(pos_score).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, num_patches):
img_size = int(num_patches**.5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size,img_size)
indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qk.weight.device
self.rel_indices = rel_indices.to(device)
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., fc=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def reset_parameters(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N**.5)
ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size,img_size)
indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1)
indd = indx**2 + indy**2
distances = indd**.5
distances = distances.to('cuda')
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class ScaleNorm(nn.Module):
"""See
https://github.com/lucidrains/reformer-pytorch/blob/a751fe2eb939dcdd81b736b2f67e745dc8472a09/reformer_pytorch/reformer_pytorch.py#L143
"""
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attention_type=GPSA,
fc=nn.Linear, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = attention_type(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, fc=fc, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, fc=fc)
def reset_parameters(self):
self.norm1.reset_parameters()
self.norm2.reset_parameters()
self.attn.reset_parameters()
self.mlp.apply(self.mlp._init_weights)
def forward(self, x, mask_heads=None, task_index=1, attn_mask=None):
if isinstance(self.attn, ClassAttention) or isinstance(self.attn, JointCA): # Like in CaiT
cls_token = x[:, :task_index]
xx = self.norm1(x)
xx, attn, v = self.attn(
xx,
mask_heads=mask_heads,
nb=task_index,
attn_mask=attn_mask
)
cls_token = self.drop_path(xx[:, :task_index]) + cls_token
cls_token = self.drop_path(self.mlp(self.norm2(cls_token))) + cls_token
return cls_token, attn, v
xx = self.norm1(x)
xx, attn, v = self.attn(xx)
x = self.drop_path(xx) + x
x = self.drop_path(self.mlp(self.norm2(x))) + x
return x, attn, v
class ClassAttention(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to do CA
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., fc=nn.Linear):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = fc(dim, dim, bias=qkv_bias)
self.k = fc(dim, dim, bias=qkv_bias)
self.v = fc(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = fc(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def reset_parameters(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x, mask_heads=None, **kwargs):
B, N, C = x.shape
q = self.q(x[:,0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = q * self.scale
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if mask_heads is not None:
mask_heads = mask_heads.expand(B, self.num_heads, -1, N)
attn = attn * mask_heads
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls, attn, v
class JointCA(nn.Module):
"""Forward all task tokens together.
It uses a masked attention so that task tokens don't interact between them.
It should have the same results as independent forward per task token but being
much faster.
HOWEVER, it works a bit worse (like ~2pts less in 'all top-1' CIFAR100 50 steps).
So if anyone knows why, please tell me!
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., fc=nn.Linear):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = fc(dim, dim, bias=qkv_bias)
self.k = fc(dim, dim, bias=qkv_bias)
self.v = fc(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = fc(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def reset_parameters(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@lru_cache(maxsize=1)
def get_attention_mask(self, attn_shape, nb_task_tokens):
"""Mask so that task tokens don't interact together.
Given two task tokens (t1, t2) and three patch tokens (p1, p2, p3), the
attention matrix is:
t1-t1 t1-t2 t1-p1 t1-p2 t1-p3
t2-t1 t2-t2 t2-p1 t2-p2 t2-p3
So that the mask (True values are deleted) should be:
False True False False False
True False False False False
"""
mask = torch.zeros(attn_shape, dtype=torch.bool)
for i in range(nb_task_tokens):
mask[:, i, :i] = True
mask[:, i, i+1:nb_task_tokens] = True
return mask
def forward(self, x, attn_mask=False, nb_task_tokens=1, **kwargs):
B, N, C = x.shape
q = self.q(x[:,:nb_task_tokens]).reshape(B, nb_task_tokens, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = q * self.scale
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1))
if attn_mask:
mask = self.get_attention_mask(attn.shape, nb_task_tokens)
attn[mask] = -float('inf')
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = (attn @ v).transpose(1, 2).reshape(B, nb_task_tokens, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls, attn, v
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def reset_parameters(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
B, C, H, W = x.shape
#assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding, from timm
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
self.apply(self._init_weights)
def reset_parameters(self):
self.apply(self._init_weights)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class ConVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer='layer',
local_up_to_layer=3, locality_strength=1., use_pos_embed=True,
class_attention=False, ca_type='base',
):
super().__init__()
self.num_classes = num_classes
self.num_heads = num_heads
self.embed_dim = embed_dim
self.local_up_to_layer = local_up_to_layer
self.num_features = self.final_dim = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if norm_layer == 'layer':
norm_layer = nn.LayerNorm
elif norm_layer == 'scale':
norm_layer = ScaleNorm
else:
raise NotImplementedError(f'Unknown normalization {norm_layer}')
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
if ca_type == 'base':
ca_block = ClassAttention
elif ca_type == 'jointca':
ca_block = JointCA
else:
raise ValueError(f'Unknown CA type {ca_type}')
for layer_index in range(depth):
if layer_index < local_up_to_layer:
# Convit
block = Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[layer_index], norm_layer=norm_layer,
attention_type=GPSA, locality_strength=locality_strength
)
elif not class_attention:
# Convit
block = Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[layer_index], norm_layer=norm_layer,
attention_type=MHSA
)
else:
# CaiT
block = Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[layer_index], norm_layer=norm_layer,
attention_type=ca_block
)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
self.norm = norm_layer(embed_dim)
self.use_class_attention = class_attention
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.head.apply(self._init_weights)
def freeze(self, names):
for name in names:
if name == 'all':
return cutils.freeze_parameters(self)
elif name == 'old_heads':
self.head.freeze(name)
elif name == 'backbone':
cutils.freeze_parameters(self.blocks)
cutils.freeze_parameters(self.patch_embed)
cutils.freeze_parameters(self.pos_embed)
cutils.freeze_parameters(self.norm)
else:
raise NotImplementedError(f'Unknown name={name}.')
def reset_classifier(self):
self.head.apply(self._init_weights)
def reset_parameters(self):
for b in self.blocks:
b.reset_parameters()
self.norm.reset_parameters()
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_internal_losses(self, clf_loss):
return {}
def end_finetuning(self):
pass
def begin_finetuning(self):
pass
def epoch_log(self):
return {}
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def forward_sa(self, x):
B = x.shape[0]
x = self.patch_embed(x)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks[:self.local_up_to_layer]:
x, _ = blk(x)
return x
def forward_features(self, x, final_norm=True):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks[:self.local_up_to_layer]:
x, _, _ = blk(x)
if self.use_class_attention:
for blk in self.blocks[self.local_up_to_layer:]:
cls_tokens, _, _ = blk(torch.cat((cls_tokens, x), dim=1))
else:
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.blocks[self.local_up_to_layer:]:
x, _ , _ = blk(x)
if final_norm:
if self.use_class_attention:
cls_tokens = self.norm(cls_tokens)
else:
x = self.norm(x)
if self.use_class_attention:
return cls_tokens[:, 0], None, None
else:
return x[:, 0], None, None
def forward(self, x):
x = self.forward_features(x)[0]
x = self.head(x)
return x
| 26,161 | 35.692847 | 152 | py |
dytox | dytox-main/continual/utils.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import datetime
import warnings
import torch
from torch import nn
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def update_dict(self, d):
for k, v in d.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
#elif 'SLURM_PROCID' in os.environ:
# args.rank = int(os.environ['SLURM_PROCID'])
# args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank,
timeout=datetime.timedelta(hours=2))
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_first_task_model(model_without_ddp, loss_scaler, task_id, args):
strict = False
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
elif os.path.isdir(args.resume):
path = os.path.join(args.resume, f"checkpoint_{task_id}.pth")
checkpoint = torch.load(path, map_location='cpu')
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_ckpt = checkpoint['model']
if not strict:
for i in range(1, 6):
k = f"head.fcs.{i}.weight"
if k in model_ckpt: del model_ckpt[k]
k = f"head.fcs.{i}.bias"
if k in model_ckpt: del model_ckpt[k]
model_without_ddp.load_state_dict(model_ckpt, strict=strict)
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
#optimizer.load_state_dict(checkpoint['optimizer'])
#lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
#args.start_epoch = checkpoint['epoch'] + 1
#if args.model_ema:
# utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
try:
loss_scaler.load_state_dict(checkpoint['scaler'])
except:
warnings.warn("Could not reload loss scaler, probably because of amp/noamp mismatch")
def change_pos_embed_size(pos_embed, new_size=32, patch_size=16, old_size=224):
nb_patches = (new_size // patch_size) ** 2
new_pos_embed = torch.randn(1, nb_patches + 1, pos_embed.shape[2])
new_pos_embed[0, 0] = pos_embed[0, 0]
lo_idx = 1
for i in range(nb_patches):
hi_idx = lo_idx + old_size // nb_patches
new_pos_embed[0, i] = pos_embed[0, lo_idx:hi_idx].mean(dim=0)
lo_idx = hi_idx
return torch.nn.Parameter(new_pos_embed)
def freeze_parameters(m, requires_grad=False):
if m is None:
return
if isinstance(m, nn.Parameter):
m.requires_grad = requires_grad
else:
for p in m.parameters():
p.requires_grad = requires_grad
| 9,641 | 30.103226 | 110 | py |
dytox | dytox-main/continual/classifier.py | import torch
from torch import nn
from torch.nn import functional as F
class Classifier(nn.Module):
def __init__(self, embed_dim, nb_total_classes, nb_base_classes, increment, nb_tasks, bias=True, complete=True, cosine=False, norm=True):
super().__init__()
self.embed_dim = embed_dim
self.nb_classes = nb_base_classes
self.cosine = cosine
if self.cosine not in (False, None, ''):
self.scale = nn.Parameter(torch.tensor(1.))
else:
self.scale = 1
self.head = nn.Linear(embed_dim, nb_base_classes, bias=not cosine)
self.norm = nn.LayerNorm(embed_dim) if norm else nn.Identitty()
self.increment = increment
def reset_parameters(self):
self.head.reset_parameters()
self.norm.reset_parameters()
def forward(self, x):
x = self.norm(x)
if self.cosine not in (False, None, ''):
w = self.head.weight # (c, d)
if self.cosine == 'pcc':
x = x - x.mean(dim=1, keepdims=True)
w = w - w.mean(dim=1, keepdims=True)
x = F.normalize(x, p=2, dim=1) # (bs, d)
w = F.normalize(w, p=2, dim=1) # (c, d)
return self.scale * torch.mm(x, w.T)
return self.head(x)
def init_prev_head(self, head):
w, b = head.weight.data, head.bias.data
self.head.weight.data[:w.shape[0], :w.shape[1]] = w
self.head.bias.data[:b.shape[0]] = b
def init_prev_norm(self, norm):
w, b = norm.weight.data, norm.bias.data
self.norm.weight.data[:w.shape[0]] = w
self.norm.bias.data[:b.shape[0]] = b
@torch.no_grad()
def weight_align(self, nb_new_classes):
w = self.head.weight.data
norms = torch.norm(w, dim=1)
norm_old = norms[:-nb_new_classes]
norm_new = norms[-nb_new_classes:]
gamma = torch.mean(norm_old) / torch.mean(norm_new)
w[-nb_new_classes:] = gamma * w[-nb_new_classes:]
def add_classes(self):
self.add_new_outputs(self.increment)
def add_new_outputs(self, n):
head = nn.Linear(self.embed_dim, self.nb_classes + n, bias=not self.cosine)
head.weight.data[:-n] = self.head.weight.data
if not self.cosine:
head.bias.data[:-n] = self.head.bias.data
head.to(self.head.weight.device)
self.head = head
self.nb_classes += n
| 2,422 | 31.743243 | 141 | py |
dytox | dytox-main/continual/mixup.py | """ Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2020 Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda', old_target=None):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
if old_target is not None:
y2 = one_hot(old_target, num_classes, on_value=on_value, off_value=off_value, device=device)
else:
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000,
loader_memory=None):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
self.loader_memory = loader_memory
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def _mix_old(self, x, old_x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
assert False
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
#x.mul_(lam).add_(old_x.mul_(1. - lam))
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
old_y = None
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
elif self.mode == 'batch' or (self.mode == 'old' and self.loader_memory is None):
lam = self._mix_batch(x)
else: # old
old_x, old_y, _ = self.loader_memory.get()
old_x, old_y = old_x.to(x.device), old_y.to(x.device)
lam = self._mix_old(x, old_x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, old_target=old_y)
return x, target, lam
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')
target = target[:batch_size]
return output, target
| 15,826 | 45.277778 | 120 | py |
dytox | dytox-main/continual/vit.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from timm.models.layers import DropPath, to_2tuple, trunc_normal_, lecun_normal_
from timm.models.registry import register_model
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'vit_small_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth',
),
# patch models (weights ported from official Google JAX impl)
'vit_base_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'vit_base_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_base_patch16_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_base_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_large_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch16_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
'vit_large_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth',
input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=1.0),
# patch models, imagenet21k (weights ported from official Google JAX impl)
'vit_base_patch16_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_base_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch16_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_large_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'vit_huge_patch14_224_in21k': _cfg(
hf_hub='timm/vit_huge_patch14_224_in21k',
num_classes=21843, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
# deit models (FB weights)
'vit_deit_tiny_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'),
'vit_deit_small_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'),
'vit_deit_base_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth',),
'vit_deit_base_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_deit_tiny_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
classifier=('head', 'head_dist')),
'vit_deit_small_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
classifier=('head', 'head_dist')),
'vit_deit_base_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
classifier=('head', 'head_dist')),
'vit_deit_base_distilled_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')),
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.patch_grid = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.patch_grid[0] * self.patch_grid[1]
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
#assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init=''):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif jax_impl and isinstance(m, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1))
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. embed_dim=768, depth=8, num_heads=8, mlp_ratio=3.
NOTE:
* this differs from the DeiT based 'small' definitions with embed_dim=384, depth=12, num_heads=6
* this model does not have a bias for QKV (unlike the official ViT and DeiT models)
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3.,
qkv_bias=False, norm_layer=nn.LayerNorm, **kwargs)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault('qk_scale', 768 ** -0.5)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_deit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_deit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_deit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
'vit_deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
'vit_deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'vit_deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'vit_deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs)
return model
| 32,176 | 45.565847 | 137 | py |
dytox | dytox-main/continual/rehearsal.py | import copy
import numpy as np
import torch
class Memory:
def __init__(self, memory_size, nb_total_classes, rehearsal, fixed=True, modes=1):
self.memory_size = memory_size
self.nb_total_classes = nb_total_classes
self.rehearsal = rehearsal
self.fixed = fixed
self.modes = modes
self.x = self.y = self.t = None
self.nb_classes = 0
@property
def memory_per_class(self):
if self.fixed:
return self.memory_size // self.nb_total_classes
return self.memory_size // self.nb_classes if self.nb_classes > 0 else self.memory_size
def get_dataset(self, base_dataset):
dataset = copy.deepcopy(base_dataset)
dataset._x = self.x
dataset._y = self.y
dataset._t = self.t
return dataset
def get(self):
return self.x, self.y, self.t
def __len__(self):
return len(self.x) if self.x is not None else 0
def save(self, path):
np.savez(
path,
x=self.x, y=self.y, t=self.t
)
def load(self, path):
data = np.load(path)
self.x = data["x"]
self.y = data["y"]
self.t = data["t"]
assert len(self) <= self.memory_size, len(self)
self.nb_classes = len(np.unique(self.y))
def reduce(self):
x, y, t = [], [], []
for class_id in np.unique(self.y):
indexes = np.where(self.y == class_id)[0]
if self.modes > 1:
selected_indexes = np.concatenate([
indexes[:len(indexes)//2][:self.memory_per_class//2],
indexes[len(indexes)//2:][:self.memory_per_class//2],
])
else:
selected_indexes = indexes[:self.memory_per_class]
x.append(self.x[selected_indexes])
y.append(self.y[selected_indexes])
t.append(self.t[selected_indexes])
self.x = np.concatenate(x)
self.y = np.concatenate(y)
self.t = np.concatenate(t)
def add(self, dataset, model, nb_new_classes):
self.nb_classes += nb_new_classes
if self.modes > 1: # todo modes more than 2
assert self.modes == 2
x1, y1, t1 = herd_samples(dataset, model, self.memory_per_class//2, self.rehearsal)
x2, y2, t2 = herd_samples(dataset, model, self.memory_per_class//2, self.rehearsal)
x = np.concatenate((x1, x2))
y = np.concatenate((y1, y2))
t = np.concatenate((t1, t2))
else:
x, y, t = herd_samples(dataset, model, self.memory_per_class, self.rehearsal)
#assert len(y) == self.memory_per_class * nb_new_classes, (len(y), self.memory_per_class, nb_new_classes)
if self.x is None:
self.x, self.y, self.t = x, y, t
else:
if not self.fixed:
self.reduce()
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self.t = np.concatenate((self.t, t))
def herd_samples(dataset, model, memory_per_class, rehearsal):
x, y, t = dataset._x, dataset._y, dataset._t
if rehearsal == "random":
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
indexes.append(
np.random.choice(class_indexes, size=memory_per_class)
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "closest" in rehearsal:
if rehearsal == 'closest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_closest_indexes = np.argsort(distances)
indexes.append(
class_indexes[class_closest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "furthest" in rehearsal:
if rehearsal == 'furthest_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=0, keepdims=True)
distances = np.power(class_features - class_mean, 2).sum(-1)
class_furthest_indexes = np.argsort(distances)[::-1]
indexes.append(
class_indexes[class_furthest_indexes[:memory_per_class]]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
elif "icarl":
if rehearsal == 'icarl_token':
handling = 'last'
else:
handling = 'all'
features, targets = extract_features(dataset, model, handling)
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
indexes.append(
class_indexes[icarl_selection(class_features, memory_per_class)]
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
else:
raise ValueError(f"Unknown rehearsal method {rehearsal}!")
def extract_features(dataset, model, ensemble_handling='last'):
#transform = copy.deepcopy(dataset.trsf.transforms)
#dataset.trsf = transforms.Compose(transform[-2:])
loader = torch.utils.data.DataLoader(
dataset,
batch_size=128,
num_workers=2,
pin_memory=True,
drop_last=False,
shuffle=False
)
features, targets = [], []
with torch.no_grad():
for x, y, _ in loader:
if hasattr(model, 'module'):
feats, _, _ = model.module.forward_features(x.cuda())
else:
feats, _, _ = model.forward_features(x.cuda())
if isinstance(feats, list):
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = torch.cat(feats, dim=1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
elif len(feats.shape) == 3: # joint tokens
if ensemble_handling == 'last':
feats = feats[-1]
elif ensemble_handling == 'all':
feats = feats.permute(1, 0, 2).view(len(x), -1)
else:
raise NotImplementedError(f'Unknown handdling of multiple features {ensemble_handling}')
feats = feats.cpu().numpy()
y = y.numpy()
features.append(feats)
targets.append(y)
features = np.concatenate(features)
targets = np.concatenate(targets)
#dataset.trsf = transforms.Compose(transform)
return features, targets
def icarl_selection(features, nb_examplars):
D = features.T
D = D / (np.linalg.norm(D, axis=0) + 1e-8)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((features.shape[0],))
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (
np.sum(herding_matrix != 0) == min(nb_examplars, features.shape[0])
) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
herding_matrix[np.where(herding_matrix == 0)[0]] = 10000
return herding_matrix.argsort()[:nb_examplars]
def get_finetuning_dataset(dataset, memory, finetuning='balanced', oversample_old=1, task_id=0):
if finetuning == 'balanced':
x, y, t = memory.get()
if oversample_old > 1:
old_indexes = np.where(t < task_id)[0]
assert len(old_indexes) > 0
new_indexes = np.where(t >= task_id)[0]
indexes = np.concatenate([
np.repeat(old_indexes, oversample_old),
new_indexes
])
x, y, t = x[indexes], y[indexes], t[indexes]
new_dataset = copy.deepcopy(dataset)
new_dataset._x = x
new_dataset._y = y
new_dataset._t = t
elif finetuning in ('all', 'none'):
new_dataset = dataset
else:
raise NotImplementedError(f'Unknown finetuning method {finetuning}')
return new_dataset
| 9,073 | 31.291815 | 113 | py |
dytox | dytox-main/continual/datasets.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import json
import os
import warnings
from continuum import ClassIncremental
from continuum.datasets import CIFAR100, ImageNet100, ImageFolderDataset
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torchvision import transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from torchvision.transforms import functional as Fv
try:
interpolation = Fv.InterpolationMode.BICUBIC
except:
interpolation = 3
class ImageNet1000(ImageFolderDataset):
"""Continuum dataset for datasets with tree-like structure.
:param train_folder: The folder of the train data.
:param test_folder: The folder of the test data.
:param download: Dummy parameter.
"""
def __init__(
self,
data_path: str,
train: bool = True,
download: bool = False,
):
super().__init__(data_path=data_path, train=train, download=download)
def get_data(self):
if self.train:
self.data_path = os.path.join(self.data_path, "train")
else:
self.data_path = os.path.join(self.data_path, "val")
return super().get_data()
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set.lower() == 'cifar':
dataset = CIFAR100(args.data_path, train=is_train, download=True)
elif args.data_set.lower() == 'imagenet100':
dataset = ImageNet100(
args.data_path, train=is_train,
data_subset=os.path.join('./imagenet100_splits', "train_100.txt" if is_train else "val_100.txt")
)
elif args.data_set.lower() == 'imagenet1000':
dataset = ImageNet1000(args.data_path, train=is_train)
else:
raise ValueError(f'Unknown dataset {args.data_set}.')
scenario = ClassIncremental(
dataset,
initial_increment=args.initial_increment,
increment=args.increment,
transformations=transform.transforms,
class_order=args.class_order
)
nb_classes = scenario.nb_classes
return scenario, nb_classes
def build_transform(is_train, args):
if args.aa == 'none':
args.aa = None
with warnings.catch_warnings():
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation='bicubic',
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
if args.input_size == 32 and args.data_set == 'CIFAR':
transform.transforms[-1] = transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=interpolation), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
if args.input_size == 32 and args.data_set == 'CIFAR':
# Normalization values for CIFAR100
t.append(transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)))
else:
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 5,681 | 34.962025 | 115 | py |
dytox | dytox-main/continual/factory.py | import torch
from continual import convit, dytox, samplers, vit
from continual.cnn import (InceptionV3, resnet18, resnet34, resnet50,
resnext50_32x4d, seresnet18, vgg16, vgg16_bn,
wide_resnet50_2, resnet18_scs, resnet18_scs_max, resnet18_scs_avg, resnet_rebuffi)
def get_backbone(args):
print(f"Creating model: {args.model}")
if args.model == 'vit':
model = vit.VisionTransformer(
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
img_size=args.input_size,
patch_size=args.patch_size,
embed_dim=args.embed_dim,
depth=args.depth,
num_heads=args.num_heads
)
elif args.model == 'convit':
model = convit.ConVit(
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
img_size=args.input_size,
patch_size=args.patch_size,
embed_dim=args.embed_dim,
depth=args.depth,
num_heads=args.num_heads,
local_up_to_layer=args.local_up_to_layer,
locality_strength=args.locality_strength,
class_attention=args.class_attention,
ca_type='jointca' if args.joint_tokens else 'base',
norm_layer=args.norm,
)
elif args.model == 'resnet18_scs': model = resnet18_scs()
elif args.model == 'resnet18_scs_avg': model = resnet18_scs_max()
elif args.model == 'resnet18_scs_max': model = resnet18_scs_avg()
elif args.model == 'resnet18': model = resnet18()
elif args.model == 'resnet34': model = resnet34()
elif args.model == 'resnet50': model = resnet50()
elif args.model == 'wide_resnet50': model = wide_resnet50_2()
elif args.model == 'resnext50': model = resnext50_32x4d()
elif args.model == 'seresnet18': model = seresnet18()
elif args.model == 'inception3': model = InceptionV3()
elif args.model == 'vgg16bn': model = vgg16_bn()
elif args.model == 'vgg16': model = vgg16()
elif args.model == 'rebuffi': model = resnet_rebuffi()
else:
raise NotImplementedError(f'Unknown backbone {args.model}')
return model
def get_loaders(dataset_train, dataset_val, args, finetuning=False):
sampler_train, sampler_val = samplers.get_sampler(dataset_train, dataset_val, args)
loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=None if (finetuning and args.ft_no_sampling) else sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=len(sampler_train) > args.batch_size,
)
loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
return loader_train, loader_val
def get_train_loaders(dataset_train, args, batch_size=None, drop_last=True):
batch_size = batch_size or args.batch_size
sampler_train = samplers.get_train_sampler(dataset_train, args)
loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=drop_last,
)
return loader_train
class InfiniteLoader:
def __init__(self, loader):
self.loader = loader
self.reset()
def reset(self):
self.it = iter(self.loader)
def get(self):
try:
return next(self.it)
except StopIteration:
self.reset()
return self.get()
def update_dytox(model_without_ddp, task_id, args):
if task_id == 0:
print(f'Creating DyTox!')
model_without_ddp = dytox.DyTox(
model_without_ddp,
nb_classes=args.initial_increment,
individual_classifier=args.ind_clf,
head_div=args.head_div > 0.,
head_div_mode=args.head_div_mode,
joint_tokens=args.joint_tokens,
resnet=args.resnet
)
else:
print(f'Updating ensemble, new embed dim {model_without_ddp.embed_dim}.')
model_without_ddp.add_model(args.increment)
return model_without_ddp
| 4,398 | 32.838462 | 109 | py |
dytox | dytox-main/continual/sam.py | import torch
class SAM:
"""SAM, ASAM, and Look-SAM
Modified version of: https://github.com/davda54/sam
Only Look-SAM has been added.
It speeds up SAM quite a lot but the alpha needs to be tuned to reach same performance.
"""
def __init__(self, base_optimizer, model_without_ddp, rho=0.05, adaptive=False, div='', use_look_sam=False, look_sam_alpha=0., **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
self.model_without_ddp = model_without_ddp
self.rho = rho
self.adaptive = adaptive
self.div = div
self.look_sam_alpha = look_sam_alpha
self.use_look_sam = use_look_sam
self.g_v = dict()
@torch.no_grad()
def first_step(self):
self.e_w = dict()
self.g = dict()
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = self.rho / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
e_w = (torch.pow(p, 2) if self.adaptive else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.e_w[p] = e_w
self.g[p] = p.grad.clone()
@torch.no_grad()
def second_step(self, look_sam_update=False):
if self.use_look_sam and look_sam_update:
self.g_v = dict()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
if not self.use_look_sam or look_sam_update:
p.sub_(self.e_w[p])
if self.use_look_sam and look_sam_update:
cos = self._cos(self.g[p], p.grad)
norm_gs = p.grad.norm(p=2)
norm_g = self.g[p].norm(p=2)
self.g_v[p] = p.grad - norm_gs * cos * self.g[p] / norm_g
elif self.use_look_sam:
norm_g = p.grad.norm(p=2)
norm_gv = self.g_v[p].norm(p=2)
p.grad.add_(self.look_sam_alpha * (norm_g / norm_gv) * self.g_v[p])
self.e_w = None
self.g = None
def _cos(self, a, b):
return torch.dot(a.view(-1), b.view(-1)) / (a.norm() * b.norm())
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if self.adaptive else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
| 3,350 | 35.032258 | 141 | py |
dytox | dytox-main/continual/pod.py | import torch
from torch.nn import functional as F
def pod_loss(feats, old_feats, scales=[1], normalize=True):
loss = 0.
assert len(feats) == len(old_feats)
for feat, old_feat in zip(feats, old_feats):
emb = _local_pod(feat, scales)
old_emb = _local_pod(old_feat, scales)
if normalize:
emb = F.normalize(emb, p=2, dim=-1)
old_emb = F.normalize(old_emb, p=2, dim=-1)
loss += torch.frobenius_norm(emb - old_emb, dim=-1)
return loss.mean() / len(feats)
def _local_pod(x, spp_scales=[1, 2, 4]):
b = x.shape[0]
w = x.shape[-1]
emb = []
for scale in spp_scales:
k = w // scale
for i in range(scale):
for j in range(scale):
tensor = x[..., i * k:(i + 1) * k, j * k:(j + 1) * k]
horizontal_pool = tensor.mean(dim=3).view(b, -1)
vertical_pool = tensor.mean(dim=2).view(b, -1)
emb.append(horizontal_pool)
emb.append(vertical_pool)
return torch.cat(emb, dim=1)
| 1,061 | 25.55 | 69 | py |
dytox | dytox-main/continual/dytox.py | import copy
import torch
from timm.models.layers import trunc_normal_
from torch import nn
from continual.cnn import resnet18
import continual.utils as cutils
from continual.convit import ClassAttention, Block
class ContinualClassifier(nn.Module):
"""Your good old classifier to do continual."""
def __init__(self, embed_dim, nb_classes):
super().__init__()
self.embed_dim = embed_dim
self.nb_classes = nb_classes
self.head = nn.Linear(embed_dim, nb_classes, bias=True)
self.norm = nn.LayerNorm(embed_dim)
def reset_parameters(self):
self.head.reset_parameters()
self.norm.reset_parameters()
def forward(self, x):
x = self.norm(x)
return self.head(x)
def add_new_outputs(self, n):
head = nn.Linear(self.embed_dim, self.nb_classes + n, bias=True)
head.weight.data[:-n] = self.head.weight.data
head.to(self.head.weight.device)
self.head = head
self.nb_classes += n
class DyTox(nn.Module):
""""DyTox for the win!
:param transformer: The base transformer.
:param nb_classes: Thhe initial number of classes.
:param individual_classifier: Classifier config, DyTox is in `1-1`.
:param head_div: Whether to use the divergence head for improved diversity.
:param head_div_mode: Use the divergence head in TRaining, FineTuning, or both.
:param joint_tokens: Use a single TAB forward with masked attention (faster but a bit worse).
"""
def __init__(
self,
transformer,
nb_classes,
individual_classifier='',
head_div=False,
head_div_mode=['tr', 'ft'],
joint_tokens=False,
resnet=False
):
super().__init__()
self.nb_classes = nb_classes
self.embed_dim = transformer.embed_dim
self.individual_classifier = individual_classifier
self.use_head_div = head_div
self.head_div_mode = head_div_mode
self.head_div = None
self.joint_tokens = joint_tokens
self.in_finetuning = False
self.use_resnet = resnet
self.nb_classes_per_task = [nb_classes]
if self.use_resnet:
print('ResNet18 backbone for ens')
self.backbone = resnet18()
self.backbone.head = nn.Sequential(
nn.Conv2d(256, 384, kernel_size=1),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 504, kernel_size=1),
nn.BatchNorm2d(504),
nn.ReLU(inplace=True)
)
self.backbone.avgpool = nn.Identity()
self.backbone.layer4 = nn.Identity()
#self.backbone.layer4 = self.backbone._make_layer_nodown(
# 256, 512, 2, stride=1, dilation=2
#)
self.backbone = self.backbone.cuda()
self.backbone.embed_dim = 504
self.embed_dim = self.backbone.embed_dim
self.tabs = nn.ModuleList([
Block(
dim=self.embed_dim, num_heads=12, mlp_ratio=4, qkv_bias=False, qk_scale=None,
drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm,
attention_type=ClassAttention
).cuda()
])
self.tabs[0].reset_parameters()
token = nn.Parameter(torch.zeros(1, 1, self.embed_dim).cuda())
trunc_normal_(token, std=.02)
self.task_tokens = nn.ParameterList([token])
else:
self.patch_embed = transformer.patch_embed
self.pos_embed = transformer.pos_embed
self.pos_drop = transformer.pos_drop
self.sabs = transformer.blocks[:transformer.local_up_to_layer]
self.tabs = transformer.blocks[transformer.local_up_to_layer:]
self.task_tokens = nn.ParameterList([transformer.cls_token])
if self.individual_classifier != '':
in_dim, out_dim = self._get_ind_clf_dim()
self.head = nn.ModuleList([
ContinualClassifier(in_dim, out_dim).cuda()
])
else:
self.head = ContinualClassifier(
self.embed_dim * len(self.task_tokens), sum(self.nb_classes_per_task)
).cuda()
def end_finetuning(self):
"""Start FT mode, usually with backbone freezed and balanced classes."""
self.in_finetuning = False
def begin_finetuning(self):
"""End FT mode, usually with backbone freezed and balanced classes."""
self.in_finetuning = True
def add_model(self, nb_new_classes):
"""Expand model as per the DyTox framework given `nb_new_classes`.
:param nb_new_classes: Number of new classes brought by the new task.
"""
self.nb_classes_per_task.append(nb_new_classes)
# Class tokens ---------------------------------------------------------
new_task_token = copy.deepcopy(self.task_tokens[-1])
trunc_normal_(new_task_token, std=.02)
self.task_tokens.append(new_task_token)
# ----------------------------------------------------------------------
# Diversity head -------------------------------------------------------
if self.use_head_div:
self.head_div = ContinualClassifier(
self.embed_dim, self.nb_classes_per_task[-1] + 1
).cuda()
# ----------------------------------------------------------------------
# Classifier -----------------------------------------------------------
if self.individual_classifier != '':
in_dim, out_dim = self._get_ind_clf_dim()
self.head.append(
ContinualClassifier(in_dim, out_dim).cuda()
)
else:
self.head = ContinualClassifier(
self.embed_dim * len(self.task_tokens), sum(self.nb_classes_per_task)
).cuda()
# ----------------------------------------------------------------------
def _get_ind_clf_dim(self):
"""What are the input and output dim of classifier depending on its config.
By default, DyTox is in 1-1.
"""
if self.individual_classifier == '1-1':
in_dim = self.embed_dim
out_dim = self.nb_classes_per_task[-1]
elif self.individual_classifier == '1-n':
in_dim = self.embed_dim
out_dim = sum(self.nb_classes_per_task)
elif self.individual_classifier == 'n-n':
in_dim = len(self.task_tokens) * self.embed_dim
out_dim = sum(self.nb_classes_per_task)
elif self.individual_classifier == 'n-1':
in_dim = len(self.task_tokens) * self.embed_dim
out_dim = self.nb_classes_per_task[-1]
else:
raise NotImplementedError(f'Unknown ind classifier {self.individual_classifier}')
return in_dim, out_dim
def freeze(self, names):
"""Choose what to freeze depending on the name of the module."""
requires_grad = False
cutils.freeze_parameters(self, requires_grad=not requires_grad)
self.train()
for name in names:
if name == 'all':
self.eval()
return cutils.freeze_parameters(self)
elif name == 'old_task_tokens':
cutils.freeze_parameters(self.task_tokens[:-1], requires_grad=requires_grad)
elif name == 'task_tokens':
cutils.freeze_parameters(self.task_tokens, requires_grad=requires_grad)
elif name == 'sab':
if self.use_resnet:
self.backbone.eval()
cutils.freeze_parameters(self.backbone, requires_grad=requires_grad)
else:
self.sabs.eval()
cutils.freeze_parameters(self.patch_embed, requires_grad=requires_grad)
cutils.freeze_parameters(self.pos_embed, requires_grad=requires_grad)
cutils.freeze_parameters(self.sabs, requires_grad=requires_grad)
elif name == 'tab':
self.tabs.eval()
cutils.freeze_parameters(self.tabs, requires_grad=requires_grad)
elif name == 'old_heads':
self.head[:-1].eval()
cutils.freeze_parameters(self.head[:-1], requires_grad=requires_grad)
elif name == 'heads':
self.head.eval()
cutils.freeze_parameters(self.head, requires_grad=requires_grad)
elif name == 'head_div':
self.head_div.eval()
cutils.freeze_parameters(self.head_div, requires_grad=requires_grad)
else:
raise NotImplementedError(f'Unknown name={name}.')
def param_groups(self):
return {
'all': self.parameters(),
'old_task_tokens': self.task_tokens[:-1],
'task_tokens': self.task_tokens.parameters(),
'new_task_tokens': [self.task_tokens[-1]],
'sa': self.sabs.parameters(),
'patch': self.patch_embed.parameters(),
'pos': [self.pos_embed],
'ca': self.tabs.parameters(),
'old_heads': self.head[:-self.nb_classes_per_task[-1]].parameters() \
if self.individual_classifier else \
self.head.parameters(),
'new_head': self.head[-1].parameters() if self.individual_classifier else self.head.parameters(),
'head': self.head.parameters(),
'head_div': self.head_div.parameters() if self.head_div is not None else None
}
def reset_classifier(self):
if isinstance(self.head, nn.ModuleList):
for head in self.head:
head.reset_parameters()
else:
self.head.reset_parameters()
def hook_before_update(self):
pass
def hook_after_update(self):
pass
def hook_after_epoch(self):
pass
def epoch_log(self):
"""Write here whatever you want to log on the internal state of the model."""
log = {}
# Compute mean distance between class tokens
mean_dist, min_dist, max_dist = [], float('inf'), 0.
with torch.no_grad():
for i in range(len(self.task_tokens)):
for j in range(i + 1, len(self.task_tokens)):
dist = torch.norm(self.task_tokens[i] - self.task_tokens[j], p=2).item()
mean_dist.append(dist)
min_dist = min(dist, min_dist)
max_dist = max(dist, max_dist)
if len(mean_dist) > 0:
mean_dist = sum(mean_dist) / len(mean_dist)
else:
mean_dist = 0.
min_dist = 0.
assert min_dist <= mean_dist <= max_dist, (min_dist, mean_dist, max_dist)
log['token_mean_dist'] = round(mean_dist, 5)
log['token_min_dist'] = round(min_dist, 5)
log['token_max_dist'] = round(max_dist, 5)
return log
def get_internal_losses(self, clf_loss):
"""If you want to compute some internal loss, like a EWC loss for example.
:param clf_loss: The main classification loss (if you wanted to use its gradient for example).
:return: a dictionnary of losses, all values will be summed in the final loss.
"""
int_losses = {}
return int_losses
def forward_features(self, x):
# Shared part, this is the ENCODER
B = x.shape[0]
if self.use_resnet:
x, self.feats = self.backbone.forward_tokens(x)
else:
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
self.feats = []
for blk in self.sabs:
x, attn, v = blk(x)
self.feats.append(x)
self.feats.pop(-1)
# Specific part, this is what we called the "task specific DECODER"
if self.joint_tokens:
return self.forward_features_jointtokens(x)
tokens = []
attentions = []
mask_heads = None
for task_token in self.task_tokens:
task_token = task_token.expand(B, -1, -1)
for blk in self.tabs:
task_token, attn, v = blk(torch.cat((task_token, x), dim=1), mask_heads=mask_heads)
attentions.append(attn)
tokens.append(task_token[:, 0])
self._class_tokens = tokens
return tokens, tokens[-1], attentions
def forward_features_jointtokens(self, x):
"""Method to do a single TAB forward with all task tokens.
A masking is used to avoid interaction between tasks. In theory it should
give the same results as multiple TAB forward, but in practice it's a little
bit worse, not sure why. So if you have an idea, please tell me!
"""
B = len(x)
task_tokens = torch.cat(
[task_token.expand(B, 1, -1) for task_token in self.task_tokens],
dim=1
)
for blk in self.tabs:
task_tokens, _, _ = blk(
torch.cat((task_tokens, x), dim=1),
task_index=len(self.task_tokens),
attn_mask=True
)
if self.individual_classifier in ('1-1', '1-n'):
return task_tokens.permute(1, 0, 2), task_tokens[:, -1], None
return task_tokens.view(B, -1), task_tokens[:, -1], None
def forward_classifier(self, tokens, last_token):
"""Once all task embeddings e_1, ..., e_t are extracted, classify.
Classifier has different mode based on a pattern x-y:
- x means the number of task embeddings in input
- y means the number of task to predict
So:
- n-n: predicts all task given all embeddings
But:
- 1-1: predict 1 task given 1 embedding, which is the 'independent classifier' used in the paper.
:param tokens: A list of all task tokens embeddings.
:param last_token: The ultimate task token embedding from the latest task.
"""
logits_div = None
if self.individual_classifier != '':
logits = []
for i, head in enumerate(self.head):
if self.individual_classifier in ('1-n', '1-1'):
logits.append(head(tokens[i]))
else: # n-1, n-n
logits.append(head(torch.cat(tokens[:i+1], dim=1)))
if self.individual_classifier in ('1-1', 'n-1'):
logits = torch.cat(logits, dim=1)
else: # 1-n, n-n
final_logits = torch.zeros_like(logits[-1])
for i in range(len(logits)):
final_logits[:, :logits[i].shape[1]] += logits[i]
for i, c in enumerate(self.nb_classes_per_task):
final_logits[:, :c] /= len(self.nb_classes_per_task) - i
logits = final_logits
elif isinstance(tokens, torch.Tensor):
logits = self.head(tokens)
else:
logits = self.head(torch.cat(tokens, dim=1))
if self.head_div is not None and eval_training_finetuning(self.head_div_mode, self.in_finetuning):
logits_div = self.head_div(last_token) # only last token
return {
'logits': logits,
'div': logits_div,
'tokens': tokens
}
def forward(self, x):
tokens, last_token, _ = self.forward_features(x)
return self.forward_classifier(tokens, last_token)
def eval_training_finetuning(mode, in_ft):
if 'tr' in mode and 'ft' in mode:
return True
if 'tr' in mode and not in_ft:
return True
if 'ft' in mode and in_ft:
return True
return False
| 15,824 | 36.768496 | 109 | py |
dytox | dytox-main/continual/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
import continual.utils as utils
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
class RASamplerNoDist(RASampler):
def __init__(self, dataset, num_replicas=None, shuffle=True):
if num_replicas is None:
num_replicas = 2
self.dataset = dataset
self.num_replicas = num_replicas
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
self.rank = 0
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
self.rank = (self.rank + 1) % self.num_replicas
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples * self.num_replicas
def get_sampler(dataset_train, dataset_val, args):
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
if args.repeated_aug:
sampler_train = RASamplerNoDist(dataset_train, num_replicas=2, shuffle=True)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
return sampler_train, sampler_val
def get_train_sampler(dataset_train, args):
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
return sampler_train
| 5,913 | 37.653595 | 119 | py |
dytox | dytox-main/continual/cnn/inception.py | """ inceptionv3 in pytorch
[1] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna
Rethinking the Inception Architecture for Computer Vision
https://arxiv.org/abs/1512.00567v3
"""
import torch
import torch.nn as nn
from continual.cnn import AbstractCNN
class BasicConv2d(nn.Module):
def __init__(self, input_channels, output_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(output_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
#same naive inception module
class InceptionA(nn.Module):
def __init__(self, input_channels, pool_features):
super().__init__()
self.branch1x1 = BasicConv2d(input_channels, 64, kernel_size=1)
self.branch5x5 = nn.Sequential(
BasicConv2d(input_channels, 48, kernel_size=1),
BasicConv2d(48, 64, kernel_size=5, padding=2)
)
self.branch3x3 = nn.Sequential(
BasicConv2d(input_channels, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
BasicConv2d(96, 96, kernel_size=3, padding=1)
)
self.branchpool = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(input_channels, pool_features, kernel_size=3, padding=1)
)
def forward(self, x):
#x -> 1x1(same)
branch1x1 = self.branch1x1(x)
#x -> 1x1 -> 5x5(same)
branch5x5 = self.branch5x5(x)
#branch5x5 = self.branch5x5_2(branch5x5)
#x -> 1x1 -> 3x3 -> 3x3(same)
branch3x3 = self.branch3x3(x)
#x -> pool -> 1x1(same)
branchpool = self.branchpool(x)
outputs = [branch1x1, branch5x5, branch3x3, branchpool]
return torch.cat(outputs, 1)
#downsample
#Factorization into smaller convolutions
class InceptionB(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.branch3x3 = BasicConv2d(input_channels, 384, kernel_size=3, stride=2)
self.branch3x3stack = nn.Sequential(
BasicConv2d(input_channels, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=2)
)
self.branchpool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, x):
#x - > 3x3(downsample)
branch3x3 = self.branch3x3(x)
#x -> 3x3 -> 3x3(downsample)
branch3x3stack = self.branch3x3stack(x)
#x -> avgpool(downsample)
branchpool = self.branchpool(x)
#"""We can use two parallel stride 2 blocks: P and C. P is a pooling
#layer (either average or maximum pooling) the activation, both of
#them are stride 2 the filter banks of which are concatenated as in
#figure 10."""
outputs = [branch3x3, branch3x3stack, branchpool]
return torch.cat(outputs, 1)
#Factorizing Convolutions with Large Filter Size
class InceptionC(nn.Module):
def __init__(self, input_channels, channels_7x7):
super().__init__()
self.branch1x1 = BasicConv2d(input_channels, 192, kernel_size=1)
c7 = channels_7x7
#In theory, we could go even further and argue that one can replace any n × n
#convolution by a 1 × n convolution followed by a n × 1 convolution and the
#computational cost saving increases dramatically as n grows (see figure 6).
self.branch7x7 = nn.Sequential(
BasicConv2d(input_channels, c7, kernel_size=1),
BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
)
self.branch7x7stack = nn.Sequential(
BasicConv2d(input_channels, c7, kernel_size=1),
BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
)
self.branch_pool = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(input_channels, 192, kernel_size=1),
)
def forward(self, x):
#x -> 1x1(same)
branch1x1 = self.branch1x1(x)
#x -> 1layer 1*7 and 7*1 (same)
branch7x7 = self.branch7x7(x)
#x-> 2layer 1*7 and 7*1(same)
branch7x7stack = self.branch7x7stack(x)
#x-> avgpool (same)
branchpool = self.branch_pool(x)
outputs = [branch1x1, branch7x7, branch7x7stack, branchpool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.branch3x3 = nn.Sequential(
BasicConv2d(input_channels, 192, kernel_size=1),
BasicConv2d(192, 320, kernel_size=3, stride=2)
)
self.branch7x7 = nn.Sequential(
BasicConv2d(input_channels, 192, kernel_size=1),
BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branchpool = nn.AvgPool2d(kernel_size=3, stride=2)
def forward(self, x):
#x -> 1x1 -> 3x3(downsample)
branch3x3 = self.branch3x3(x)
#x -> 1x1 -> 1x7 -> 7x1 -> 3x3 (downsample)
branch7x7 = self.branch7x7(x)
#x -> avgpool (downsample)
branchpool = self.branchpool(x)
outputs = [branch3x3, branch7x7, branchpool]
return torch.cat(outputs, 1)
#same
class InceptionE(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.branch1x1 = BasicConv2d(input_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(input_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3stack_1 = BasicConv2d(input_channels, 448, kernel_size=1)
self.branch3x3stack_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3stack_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3stack_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(input_channels, 192, kernel_size=1)
)
def forward(self, x):
#x -> 1x1 (same)
branch1x1 = self.branch1x1(x)
# x -> 1x1 -> 3x1
# x -> 1x1 -> 1x3
# concatenate(3x1, 1x3)
#"""7. Inception modules with expanded the filter bank outputs.
#This architecture is used on the coarsest (8 × 8) grids to promote
#high dimensional representations, as suggested by principle
#2 of Section 2."""
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3)
]
branch3x3 = torch.cat(branch3x3, 1)
# x -> 1x1 -> 3x3 -> 1x3
# x -> 1x1 -> 3x3 -> 3x1
#concatenate(1x3, 3x1)
branch3x3stack = self.branch3x3stack_1(x)
branch3x3stack = self.branch3x3stack_2(branch3x3stack)
branch3x3stack = [
self.branch3x3stack_3a(branch3x3stack),
self.branch3x3stack_3b(branch3x3stack)
]
branch3x3stack = torch.cat(branch3x3stack, 1)
branchpool = self.branch_pool(x)
outputs = [branch1x1, branch3x3, branch3x3stack, branchpool]
return torch.cat(outputs, 1)
class InceptionV3(AbstractCNN):
def __init__(self, num_classes=100):
super().__init__()
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, padding=1)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3, padding=1)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
#naive inception module
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
#downsample
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
#downsample
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
#6*6 feature size
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout2d()
self.head = None
self.embed_dim = 2048
def forward(self, x):
#32 -> 30
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
#30 -> 30
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
#30 -> 14
#Efficient Grid Size Reduction to avoid representation
#bottleneck
x = self.Mixed_6a(x)
#14 -> 14
#"""In practice, we have found that employing this factorization does not
#work well on early layers, but it gives very good results on medium
#grid-sizes (On m × m feature maps, where m ranges between 12 and 20).
#On that level, very good results can be achieved by using 1 × 7 convolutions
#followed by 7 × 1 convolutions."""
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
#14 -> 6
#Efficient Grid Size Reduction
x = self.Mixed_7a(x)
#6 -> 6
#We are using this solution only on the coarsest grid,
#since that is the place where producing high dimensional
#sparse representation is the most critical as the ratio of
#local processing (by 1 × 1 convolutions) is increased compared
#to the spatial aggregation."""
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
#6 -> 1
x = self.avgpool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.head(x)
return x
def inceptionv3():
return InceptionV3()
| 10,916 | 31.108824 | 90 | py |
dytox | dytox-main/continual/cnn/resnet.py | #from .utils import load_state_dict_from_url
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch.nn import functional as F
from continual.cnn import AbstractCNN
from torch import Tensor
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
#if dilation > 1:
# raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
#out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(AbstractCNN):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
#self.fc = nn.Linear(512 * block.expansion, num_classes)
self.embed_dim = 512 * block.expansion
self.head = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _make_layer_nodown(self, inplanes: int, planes: int, blocks: int,
stride: int = 1, dilation: int = 1) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = nn.Conv2d(256, 512, kernel_size=1)
previous_dilation = self.dilation = dilation
layers = []
layers.append(BasicBlock(inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * BasicBlock.expansion
for _ in range(1, blocks):
layers.append(BasicBlock(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = F.relu(self.layer1(x), inplace=True)
x = F.relu(self.layer2(x), inplace=True)
x = F.relu(self.layer3(x), inplace=True)
x = F.relu(self.layer4(x), inplace=True)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.head(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def forward_tokens(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
f1 = self.layer1(x)
f2 = self.layer2(F.relu(f1))
f3 = self.layer3(F.relu(f2))
f4 = self.layer4(F.relu(f3))
x = self.head(f4)
return x.view(x.shape[0], self.embed_dim, -1).permute(0, 2, 1), [f1, f2, f3, f4]
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
f1 = F.relu(self.layer1(x), inplace=True)
f2 = F.relu(self.layer2(f1), inplace=True)
f3 = F.relu(self.layer3(f2), inplace=True)
f4 = F.relu(self.layer4(f3), inplace=True)
x = self.avgpool(f4)
x = torch.flatten(x, 1)
return x, [f1, f2, f3, f4], None
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 17,344 | 38.331066 | 111 | py |
dytox | dytox-main/continual/cnn/vgg.py | import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
from typing import Union, List, Dict, Any, cast
from continual.cnn import AbstractCNN
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(AbstractCNN):
def __init__(
self,
features: nn.Module,
num_classes: int = 1000,
init_weights: bool = True
) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
)
self.head = None
self.embed_dim = 4096
if init_weights:
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return self.head(x)
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
| 7,858 | 39.302564 | 114 | py |
dytox | dytox-main/continual/cnn/senet.py |
from continual.cnn import AbstractCNN
"""
SEResNet implementation from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Additional credit to https://github.com/creafz
Original model: https://github.com/hujie-frank/SENet
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate
support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here.
"""
import math
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import create_classifier
from timm.models.registry import register_model
__all__ = ['SENet']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'layer0.conv1', 'classifier': 'last_linear',
**kwargs
}
default_cfgs = {
'legacy_senet154':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'),
'legacy_seresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
interpolation='bicubic'),
'legacy_seresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'),
'legacy_seresnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'),
'legacy_seresnet101': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'),
'legacy_seresnet152': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'),
'legacy_seresnext26_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth',
interpolation='bicubic'),
'legacy_seresnext50_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'),
'legacy_seresnext101_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'),
}
def _weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = x.mean((2, 3), keepdim=True)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(
planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(
inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(
width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes, reduction=reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SENet(AbstractCNN):
def __init__(self, block, layers, groups, reduction, drop_rate=0.2,
in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1,
downsample_padding=0, num_classes=1000, global_pool='avg'):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
self.num_classes = num_classes
self.drop_rate = drop_rate
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(
in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
# To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.
self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')]
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')]
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')]
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')]
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')]
self.num_features = 512 * block.expansion
self.embed_dim = 512 * block.expansion
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
for m in self.modules():
_weight_init(m)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size,
stride=stride, padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.layer0(x)
x = self.pool0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.logits(x)
return x
def _create_senet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
SENet, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def legacy_seresnet18(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet18', pretrained, **model_args)
@register_model
def legacy_seresnet34(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet34', pretrained, **model_args)
@register_model
def legacy_seresnet50(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet50', pretrained, **model_args)
@register_model
def legacy_seresnet101(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet101', pretrained, **model_args)
@register_model
def legacy_seresnet152(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet152', pretrained, **model_args)
@register_model
def legacy_senet154(pretrained=False, **kwargs):
model_args = dict(
block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16,
downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs)
return _create_senet('legacy_senet154', pretrained, **model_args)
@register_model
def legacy_seresnext26_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext50_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext101_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
| 17,758 | 36.545455 | 127 | py |
dytox | dytox-main/continual/cnn/resnet_scs.py | #from .utils import load_state_dict_from_url
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch.nn import functional as F
from continual.cnn import AbstractCNN
from torch import Tensor
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class SharpenedCosineSimilarity(nn.Module):
"""https://github.com/brohrer/sharpened_cosine_similarity_torch"""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_size=1,
stride=1,
padding=0,
eps=1e-12,
):
super(SharpenedCosineSimilarity, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
nn.init.xavier_uniform_(w)
self.w = nn.Parameter(
w.view(out_channels, in_channels, -1), requires_grad=True)
self.p_scale = 10
p_init = 2**.5 * self.p_scale
self.register_parameter("p", nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter("q", nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, x):
x = unfold2d(
x,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding)
n, c, h, w, _, _ = x.shape
x = x.reshape(n,c,h,w,-1)
# After unfolded and reshaped, dimensions of the images x are
# dim 0, n: batch size
# dim 1, c: number of input channels
# dim 2, h: number of rows in the image
# dim 3, w: number of columns in the image
# dim 4, l: kernel size, squared
#
# The dimensions of the weights w are
# dim 0, v: number of output channels
# dim 1, c: number of input channels
# dim 2, l: kernel size, squared
square_sum = torch.sum(torch.square(x), [1, 4], keepdim=True)
x_norm = torch.add(
torch.sqrt(square_sum + self.eps),
torch.square(self.q / self.q_scale))
square_sum = torch.sum(torch.square(self.w), [1, 2], keepdim=True)
w_norm = torch.add(
torch.sqrt(square_sum + self.eps),
torch.square(self.q / self.q_scale))
x = torch.einsum('nchwl,vcl->nvhw', x / x_norm, self.w / w_norm)
sign = torch.sign(x)
x = torch.abs(x) + self.eps
x = x.pow(torch.square(self.p / self.p_scale).view(1, -1, 1, 1))
return sign * x
def unfold2d(x, kernel_size:int, stride:int, padding:int):
x = F.pad(x, [padding]*4)
bs, in_c, h, w = x.size()
ks = kernel_size
strided_x = x.as_strided(
(bs, in_c, (h - ks) // stride + 1, (w - ks) // stride + 1, ks, ks),
(in_c * h * w, h * w, stride * w, stride, w, 1))
return strided_x
class AbsPool(nn.Module):
"""https://github.com/brohrer/sharpened_cosine_similarity_torch"""
def __init__(self, pooling_module=None, *args, **kwargs):
super(AbsPool, self).__init__()
self.pooling_layer = pooling_module(*args, **kwargs)
def forward(self, x: torch.Tensor) -> torch.Tensor:
pos_pool = self.pooling_layer(x)
neg_pool = self.pooling_layer(-x)
abs_pool = torch.where(pos_pool >= neg_pool, pos_pool, -neg_pool)
return abs_pool
from functools import partial
MaxAbsPool2d = partial(AbsPool, nn.MaxPool2d)
AdaptiveAvgAbsPool = partial(AbsPool, nn.AdaptiveAvgPool2d)
AdaptiveMaxAbsPool = partial(AbsPool, nn.AdaptiveMaxPool2d)
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return SharpenedCosineSimilarity(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return SharpenedCosineSimilarity(in_planes, out_planes, kernel_size=1, stride=stride)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
#if dilation > 1:
# raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(AbstractCNN):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
pool='avg'
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = MaxAbsPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
if pool == 'avg':
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
elif pool == 'avgabs':
self.avgpool = AdaptiveAvgAbsPool((1, 1))
elif pool == 'maxabs':
self.avgpool = AdaptiveMaxAbsPool((1, 1))
else:
raise NotImplementedError(f'Unknown final pooling for ResNet-SCS')
#self.fc = nn.Linear(512 * block.expansion, num_classes)
self.embed_dim = 512 * block.expansion
self.head = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _make_layer_nodown(self, inplanes: int, planes: int, blocks: int,
stride: int = 1, dilation: int = 1) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = nn.Conv2d(256, 512, kernel_size=1)
previous_dilation = self.dilation = dilation
layers = []
layers.append(BasicBlock(inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * BasicBlock.expansion
for _ in range(1, blocks):
layers.append(BasicBlock(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.head(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def forward_tokens(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.head(x)
return x.view(x.shape[0], self.embed_dim, -1).permute(0, 2, 1)
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x, None, None
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18_scs(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet18_scs_avg(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, pool='avgabs',
**kwargs)
def resnet18_scs_max(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, pool='maxabs',
**kwargs)
def resnet34_scs(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50_scs(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101_scs(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152_scs(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 21,904 | 37.029514 | 111 | py |
dytox | dytox-main/continual/cnn/abstract.py | from torch import nn
import continual.utils as cutils
class AbstractCNN(nn.Module):
def reset_classifier(self):
self.head.reset_parameters()
def get_internal_losses(self, clf_loss):
return {}
def end_finetuning(self):
pass
def begin_finetuning(self):
pass
def epoch_log(self):
return {}
def get_classifier(self):
return self.head
def freeze(self, names):
cutils.freeze_parameters(self, requires_grad=True)
self.train()
for name in names:
if name == 'head':
cutils.freeze_parameters(self.head)
self.head.eval()
elif name == 'backbone':
for k, p in self.named_parameters():
if not k.startswith('head'):
cutils.freeze_parameters(p)
elif name == 'all':
cutils.freeze_parameters(self)
self.eval()
else:
raise NotImplementedError(f'Unknown module name to freeze {name}')
| 1,063 | 24.333333 | 82 | py |
dytox | dytox-main/continual/cnn/resnet_rebuffi.py | """Pytorch port of the resnet used for CIFAR100 by iCaRL.
https://github.com/srebuffi/iCaRL/blob/master/iCaRL-TheanoLasagne/utils_cifar100.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from continual.cnn import AbstractCNN
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class DownsampleConv(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(inplanes, planes, stride=2, kernel_size=1, bias=False),
nn.BatchNorm2d(planes),
)
def forward(self, x):
return self.conv(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False, downsampling="stride"):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
if downsampling == "stride":
self.downsampler = DownsampleStride()
self._need_pad = True
else:
self.downsampler = DownsampleConv(inplanes, planes)
self._need_pad = False
self.last_relu = last_relu
@staticmethod
def pad(x):
return torch.cat((x, x.mul(0)), 1)
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsampler(x)
if self._need_pad:
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class PreActResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last_relu=False):
super().__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last_relu = last_relu
def forward(self, x):
y = self.bn_a(x)
y = F.relu(y, inplace=True)
y = self.conv_a(x)
y = self.bn_b(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
y = x + y
if self.last_relu:
y = F.relu(y, inplace=True)
return y
class Stage(nn.Module):
def __init__(self, blocks, block_relu=False):
super().__init__()
self.blocks = nn.ModuleList(blocks)
self.block_relu = block_relu
def forward(self, x):
intermediary_features = []
for b in self.blocks:
x = b(x)
intermediary_features.append(x)
if self.block_relu:
x = F.relu(x)
return intermediary_features, x
class CifarResNet(AbstractCNN):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(
self,
n=5,
nf=16,
channels=3,
preact=False,
zero_residual=True,
pooling_config={"type": "avg"},
downsampling="stride",
all_attentions=False,
last_relu=True,
**kwargs
):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
if kwargs:
raise ValueError("Unused kwargs: {}.".format(kwargs))
self.all_attentions = all_attentions
self._downsampling_type = downsampling
self.last_relu = last_relu
self.zero_residual = zero_residual
Block = ResidualBlock if not preact else PreActResidualBlock
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, nf, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(nf)
self.stage_1 = self._make_layer(Block, nf, increase_dim=False, n=n)
self.stage_2 = self._make_layer(Block, nf, increase_dim=True, n=n - 1)
self.stage_3 = self._make_layer(Block, 2 * nf, increase_dim=True, n=n - 2)
self.stage_4 = Block(
4 * nf, increase_dim=False, last_relu=False, downsampling=self._downsampling_type
)
if pooling_config["type"] == "avg":
self.pool = nn.AdaptiveAvgPool2d((1, 1))
else:
raise ValueError("Unknown pooling type {}.".format(pooling_config["type"]))
self.embed_dim = 4 * nf
self.head = None
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if self.zero_residual:
for m in self.modules():
if isinstance(m, ResidualBlock):
nn.init.constant_(m.bn_b.weight, 0)
def _make_layer(self, Block, planes, increase_dim=False, n=None):
layers = []
if increase_dim:
layers.append(
Block(
planes,
increase_dim=True,
last_relu=True,
downsampling=self._downsampling_type
)
)
planes = 2 * planes
for i in range(n):
layers.append(Block(planes, last_relu=True, downsampling=self._downsampling_type))
return Stage(layers, block_relu=self.last_relu)
@property
def last_conv(self):
return self.stage_4.conv_b
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
feats_s1, x = self.stage_1(x)
feats_s2, x = self.stage_2(x)
feats_s3, x = self.stage_3(x)
x = self.stage_4(x)
features = self.end_features(F.relu(x, inplace=False))
return self.head(features)
def end_features(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
return x
def resnet_rebuffi(n=5, **kwargs):
return CifarResNet(n=n, **kwargs)
| 7,575 | 25.960854 | 97 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/doubleunet_pytorch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vgg19
class Conv2D(nn.Module):
def __init__(self, in_c, out_c, kernel_size=3, padding=1, dilation=1, bias=False, act=True):
super().__init__()
self.act = act
self.conv = nn.Sequential(
nn.Conv2d(
in_c, out_c,
kernel_size=kernel_size,
padding=padding,
dilation=dilation,
bias=bias
),
nn.BatchNorm2d(out_c)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.act == True:
x = self.relu(x)
return x
class squeeze_excitation_block(nn.Module):
def __init__(self, in_channels, ratio=8):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, in_channels//ratio),
nn.ReLU(inplace=True),
nn.Linear(in_channels//ratio, in_channels),
nn.Sigmoid()
)
def forward(self, x):
batch_size, channel_size, _, _ = x.size()
y = self.avgpool(x).view(batch_size, channel_size)
y = self.fc(y).view(batch_size, channel_size, 1, 1)
return x*y.expand_as(x)
class ASPP(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.avgpool = nn.Sequential(
nn.AdaptiveAvgPool2d((2, 2)),
Conv2D(in_c, out_c, kernel_size=1, padding=0)
)
self.c1 = Conv2D(in_c, out_c, kernel_size=1, padding=0, dilation=1)
self.c2 = Conv2D(in_c, out_c, kernel_size=3, padding=6, dilation=6)
self.c3 = Conv2D(in_c, out_c, kernel_size=3, padding=12, dilation=12)
self.c4 = Conv2D(in_c, out_c, kernel_size=3, padding=18, dilation=18)
self.c5 = Conv2D(out_c*5, out_c, kernel_size=1, padding=0, dilation=1)
def forward(self, x):
x0 = self.avgpool(x)
x0 = F.interpolate(x0, size=x.size()[2:], mode="bilinear", align_corners=True)
x1 = self.c1(x)
x2 = self.c2(x)
x3 = self.c3(x)
x4 = self.c4(x)
xc = torch.cat([x0, x1, x2, x3, x4], axis=1)
y = self.c5(xc)
return y
class conv_block(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.c1 = Conv2D(in_c, out_c)
self.c2 = Conv2D(out_c, out_c)
self.a1 = squeeze_excitation_block(out_c)
def forward(self, x):
x = self.c1(x)
x = self.c2(x)
x = self.a1(x)
return x
class encoder1(nn.Module):
def __init__(self):
super().__init__()
network = vgg19(pretrained=True)
# print(network)
self.x1 = network.features[:4]
self.x2 = network.features[4:9]
self.x3 = network.features[9:18]
self.x4 = network.features[18:27]
self.x5 = network.features[27:36]
def forward(self, x):
x0 = x
x1 = self.x1(x0)
x2 = self.x2(x1)
x3 = self.x3(x2)
x4 = self.x4(x3)
x5 = self.x5(x4)
return x5, [x4, x3, x2, x1]
class decoder1(nn.Module):
def __init__(self):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.c1 = conv_block(64+512, 256)
self.c2 = conv_block(512, 128)
self.c3 = conv_block(256, 64)
self.c4 = conv_block(128, 32)
def forward(self, x, skip):
s1, s2, s3, s4 = skip
x = self.up(x)
x = torch.cat([x, s1], axis=1)
x = self.c1(x)
x = self.up(x)
x = torch.cat([x, s2], axis=1)
x = self.c2(x)
x = self.up(x)
x = torch.cat([x, s3], axis=1)
x = self.c3(x)
x = self.up(x)
x = torch.cat([x, s4], axis=1)
x = self.c4(x)
return x
class encoder2(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d((2, 2))
self.c1 = conv_block(3, 32)
self.c2 = conv_block(32, 64)
self.c3 = conv_block(64, 128)
self.c4 = conv_block(128, 256)
def forward(self, x):
x0 = x
x1 = self.c1(x0)
p1 = self.pool(x1)
x2 = self.c2(p1)
p2 = self.pool(x2)
x3 = self.c3(p2)
p3 = self.pool(x3)
x4 = self.c4(p3)
p4 = self.pool(x4)
return p4, [x4, x3, x2, x1]
class decoder2(nn.Module):
def __init__(self):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
self.c1 = conv_block(832, 256)
self.c2 = conv_block(640, 128)
self.c3 = conv_block(320, 64)
self.c4 = conv_block(160, 32)
def forward(self, x, skip1, skip2):
x = self.up(x)
x = torch.cat([x, skip1[0], skip2[0]], axis=1)
x = self.c1(x)
x = self.up(x)
x = torch.cat([x, skip1[1], skip2[1]], axis=1)
x = self.c2(x)
x = self.up(x)
x = torch.cat([x, skip1[2], skip2[2]], axis=1)
x = self.c3(x)
x = self.up(x)
x = torch.cat([x, skip1[3], skip2[3]], axis=1)
x = self.c4(x)
return x
class build_doubleunet(nn.Module):
def __init__(self):
super().__init__()
self.e1 = encoder1()
self.a1 = ASPP(512, 64)
self.d1 = decoder1()
self.y1 = nn.Conv2d(32, 1, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
self.e2 = encoder2()
self.a2 = ASPP(256, 64)
self.d2 = decoder2()
self.y2 = nn.Conv2d(32, 1, kernel_size=1, padding=0)
def forward(self, x):
x0 = x
x, skip1 = self.e1(x)
x = self.a1(x)
x = self.d1(x, skip1)
y1 = self.y1(x)
input_x = x0 * self.sigmoid(y1)
x, skip2 = self.e2(input_x)
x = self.a2(x)
x = self.d2(x, skip1, skip2)
y2 = self.y2(x)
return y1, y2
if __name__ == "__main__":
x = torch.randn((8, 3, 256, 256))
model = build_doubleunet()
y1, y2 = model(x)
print(y1.shape, y2.shape)
| 6,190 | 24.903766 | 96 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/utils.py |
import os
import numpy as np
import cv2
import json
from glob import glob
from metrics import *
from sklearn.utils import shuffle
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras.models import load_model
from model import build_model, Upsample, ASPP
def create_dir(path):
""" Create a directory. """
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError:
print(f"Error: creating directory with name {path}")
def read_data(x, y):
""" Read the image and mask from the given path. """
image = cv2.imread(x, cv2.IMREAD_COLOR)
mask = cv2.imread(y, cv2.IMREAD_COLOR)
return image, mask
def read_params():
""" Reading the parameters from the JSON file."""
with open("params.json", "r") as f:
data = f.read()
params = json.loads(data)
return params
def load_data(path):
""" Loading the data from the given path. """
images_path = os.path.join(path, "image/*")
masks_path = os.path.join(path, "mask/*")
images = glob(images_path)
masks = glob(masks_path)
return images, masks
def shuffling(x, y):
x, y = shuffle(x, y, random_state=42)
return x, y
def load_model_weight(path):
with CustomObjectScope({
'dice_loss': dice_loss,
'dice_coef': dice_coef,
'bce_dice_loss': bce_dice_loss,
'focal_loss': focal_loss,
'iou': iou
}):
model = load_model(path)
return model
# model = build_model(256)
# model.load_weights(path)
# return model
| 1,566 | 24.688525 | 60 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/model.py |
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.applications import *
def squeeze_excite_block(inputs, ratio=8):
init = inputs
channel_axis = -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
x = Multiply()([init, se])
return x
def conv_block(inputs, filters):
x = inputs
x = Conv2D(filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = squeeze_excite_block(x)
return x
def encoder1(inputs):
skip_connections = []
model = VGG19(include_top=False, weights='imagenet', input_tensor=inputs)
names = ["block1_conv2", "block2_conv2", "block3_conv4", "block4_conv4"]
for name in names:
skip_connections.append(model.get_layer(name).output)
output = model.get_layer("block5_conv4").output
return output, skip_connections
def decoder1(inputs, skip_connections):
num_filters = [256, 128, 64, 32]
skip_connections.reverse()
x = inputs
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2), interpolation='bilinear')(x)
x = Concatenate()([x, skip_connections[i]])
x = conv_block(x, f)
return x
# def encoder2(inputs):
# skip_connections = []
#
# output = DenseNet121(include_top=False, weights='imagenet')(inputs)
# model = tf.keras.models.Model(inputs, output)
#
# names = ["input_2", "conv1/relu", "pool2_conv", "pool3_conv"]
# for name in names:
# skip_connections.append(model.get_layer(name).output)
# output = model.get_layer("pool4_conv").output
#
# return output, skip_connections
def encoder2(inputs):
num_filters = [32, 64, 128, 256]
skip_connections = []
x = inputs
for i, f in enumerate(num_filters):
x = conv_block(x, f)
skip_connections.append(x)
x = MaxPool2D((2, 2))(x)
return x, skip_connections
def decoder2(inputs, skip_1, skip_2):
num_filters = [256, 128, 64, 32]
skip_2.reverse()
x = inputs
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2), interpolation='bilinear')(x)
x = Concatenate()([x, skip_1[i], skip_2[i]])
x = conv_block(x, f)
return x
def output_block(inputs):
x = Conv2D(1, (1, 1), padding="same")(inputs)
x = Activation('sigmoid')(x)
return x
def Upsample(tensor, size):
"""Bilinear upsampling"""
def _upsample(x, size):
return tf.image.resize(images=x, size=size)
return Lambda(lambda x: _upsample(x, size), output_shape=size)(tensor)
def ASPP(x, filter):
shape = x.shape
y1 = AveragePooling2D(pool_size=(shape[1], shape[2]))(x)
y1 = Conv2D(filter, 1, padding="same")(y1)
y1 = BatchNormalization()(y1)
y1 = Activation("relu")(y1)
y1 = UpSampling2D((shape[1], shape[2]), interpolation='bilinear')(y1)
y2 = Conv2D(filter, 1, dilation_rate=1, padding="same", use_bias=False)(x)
y2 = BatchNormalization()(y2)
y2 = Activation("relu")(y2)
y3 = Conv2D(filter, 3, dilation_rate=6, padding="same", use_bias=False)(x)
y3 = BatchNormalization()(y3)
y3 = Activation("relu")(y3)
y4 = Conv2D(filter, 3, dilation_rate=12, padding="same", use_bias=False)(x)
y4 = BatchNormalization()(y4)
y4 = Activation("relu")(y4)
y5 = Conv2D(filter, 3, dilation_rate=18, padding="same", use_bias=False)(x)
y5 = BatchNormalization()(y5)
y5 = Activation("relu")(y5)
y = Concatenate()([y1, y2, y3, y4, y5])
y = Conv2D(filter, 1, dilation_rate=1, padding="same", use_bias=False)(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
return y
def build_model(shape):
inputs = Input(shape)
x, skip_1 = encoder1(inputs)
x = ASPP(x, 64)
x = decoder1(x, skip_1)
outputs1 = output_block(x)
x = inputs * outputs1
x, skip_2 = encoder2(x)
x = ASPP(x, 64)
x = decoder2(x, skip_1, skip_2)
outputs2 = output_block(x)
outputs = Concatenate()([outputs1, outputs2])
model = Model(inputs, outputs)
return model
if __name__ == "__main__":
model = build_model((192, 256, 3))
model.summary()
| 4,571 | 27.397516 | 103 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/metrics.py | import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.losses import binary_crossentropy
smooth = 1e-15
def dice_coef(y_true, y_pred):
y_true = tf.keras.layers.Flatten()(y_true)
y_pred = tf.keras.layers.Flatten()(y_pred)
intersection = tf.reduce_sum(y_true * y_pred)
return (2. * intersection + smooth) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + smooth)
def dice_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
def iou(y_true, y_pred):
def f(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
x = (intersection + smooth) / (union + smooth)
x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
def bce_dice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
def focal_loss(y_true, y_pred):
alpha=0.25
gamma=2
def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred):
weight_a = alpha * (1 - y_pred) ** gamma * targets
weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets)
return (tf.math.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
logits = tf.math.log(y_pred / (1 - y_pred))
loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)
# or reduce_sum and/or axis=-1
return tf.reduce_mean(loss)
| 1,661 | 37.651163 | 121 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/predict.py |
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from glob import glob
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from utils import *
from train import tf_dataset
def read_image(x):
image = cv2.imread(x, cv2.IMREAD_COLOR)
image = np.clip(image - np.median(image)+127, 0, 255)
image = image/255.0
image = image.astype(np.float32)
image = np.expand_dims(image, axis=0)
return image
def read_mask(y):
mask = cv2.imread(y, cv2.IMREAD_GRAYSCALE)
mask = mask.astype(np.float32)
mask = mask/255.0
mask = np.expand_dims(mask, axis=-1)
return mask
def mask_to_3d(mask):
mask = np.squeeze(mask)
mask = [mask, mask, mask]
mask = np.transpose(mask, (1, 2, 0))
return mask
def parse(y_pred):
y_pred = np.expand_dims(y_pred, axis=-1)
y_pred = y_pred[..., -1]
y_pred = y_pred.astype(np.float32)
y_pred = np.expand_dims(y_pred, axis=-1)
return y_pred
def evaluate_normal(model, x_data, y_data):
THRESHOLD = 0.5
total = []
for i, (x, y) in tqdm(enumerate(zip(x_data, y_data)), total=len(x_data)):
x = read_image(x)
y = read_mask(y)
_, h, w, _ = x.shape
y_pred1 = parse(model.predict(x)[0][..., -2])
y_pred2 = parse(model.predict(x)[0][..., -1])
line = np.ones((h, 10, 3)) * 255.0
all_images = [
x[0] * 255.0, line,
mask_to_3d(y) * 255.0, line,
mask_to_3d(y_pred1) * 255.0, line,
mask_to_3d(y_pred2) * 255.0
]
mask = np.concatenate(all_images, axis=1)
cv2.imwrite(f"results/{i}.png", mask)
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = tf.keras.layers.Flatten()(y_true)
y_pred_f = tf.keras.layers.Flatten()(y_pred)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
def dice_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
if __name__ == "__main__":
np.random.seed(42)
tf.random.set_seed(42)
create_dir("results/")
batch_size = 8
test_path = "../1/new_data/test/"
test_x = sorted(glob(os.path.join(test_path, "image", "*.jpg")))
test_y = sorted(glob(os.path.join(test_path, "mask", "*.jpg")))
test_dataset = tf_dataset(test_x, test_y, batch=batch_size)
test_steps = (len(test_x)//batch_size)
if len(test_x) % batch_size != 0:
test_steps += 1
model = load_model_weight("files/model.h5")
model.evaluate(test_dataset, steps=test_steps)
evaluate_normal(model, test_x, test_y)
| 2,753 | 28.297872 | 102 | py |
2020-CBMS-DoubleU-Net | 2020-CBMS-DoubleU-Net-master/train.py |
import os
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.callbacks import *
from tensorflow.keras.optimizers import Adam, Nadam
from tensorflow.keras.metrics import *
from glob import glob
from sklearn.model_selection import train_test_split
from model import build_model
from utils import *
from metrics import *
def read_image(x):
x = x.decode()
image = cv2.imread(x, cv2.IMREAD_COLOR)
image = np.clip(image - np.median(image)+127, 0, 255)
image = image/255.0
image = image.astype(np.float32)
return image
def read_mask(y):
y = y.decode()
mask = cv2.imread(y, cv2.IMREAD_GRAYSCALE)
mask = mask/255.0
mask = mask.astype(np.float32)
mask = np.expand_dims(mask, axis=-1)
return mask
def parse_data(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
y = np.concatenate([y, y], axis=-1)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float32, tf.float32])
x.set_shape([384, 512, 3])
y.set_shape([384, 512, 2])
return x, y
def tf_dataset(x, y, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.shuffle(buffer_size=32)
dataset = dataset.map(map_func=parse_data)
dataset = dataset.repeat()
dataset = dataset.batch(batch)
return dataset
if __name__ == "__main__":
np.random.seed(42)
tf.random.set_seed(42)
create_dir("files")
train_path = "../1/new_data/train/"
valid_path = "../1/new_data/valid/"
## Training
train_x = sorted(glob(os.path.join(train_path, "image", "*.jpg")))
train_y = sorted(glob(os.path.join(train_path, "mask", "*.jpg")))
## Shuffling
train_x, train_y = shuffling(train_x, train_y)
## Validation
valid_x = sorted(glob(os.path.join(valid_path, "image", "*.jpg")))
valid_y = sorted(glob(os.path.join(valid_path, "mask", "*.jpg")))
model_path = "files/model.h5"
batch_size = 16
epochs = 300
lr = 1e-4
shape = (384, 512, 3)
model = build_model(shape)
metrics = [
dice_coef,
iou,
Recall(),
Precision()
]
train_dataset = tf_dataset(train_x, train_y, batch=batch_size)
valid_dataset = tf_dataset(valid_x, valid_y, batch=batch_size)
model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=metrics)
callbacks = [
ModelCheckpoint(model_path),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20),
CSVLogger("files/data.csv"),
TensorBoard(),
EarlyStopping(monitor='val_loss', patience=50, restore_best_weights=False)
]
train_steps = (len(train_x)//batch_size)
valid_steps = (len(valid_x)//batch_size)
if len(train_x) % batch_size != 0:
train_steps += 1
if len(valid_x) % batch_size != 0:
valid_steps += 1
model.fit(train_dataset,
epochs=epochs,
validation_data=valid_dataset,
steps_per_epoch=train_steps,
validation_steps=valid_steps,
callbacks=callbacks,
shuffle=False)
| 3,101 | 26.451327 | 82 | py |
UADAD | UADAD-main/utils.py | from math import ceil
import pandas as pd
import os
import sys
import numpy as np
import torch
import qgel
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn import metrics
def load_data(file_path, names = None):
names = names
data = pd.read_csv(file_path, names = names)
return data
def one_hot_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
for column in encode_data:
new_data = pd.get_dummies(encode_data[column], prefix=column)
encoded_data = pd.concat([encoded_data, new_data], axis=1)
data.drop(cols, axis=1, inplace=True)
data = pd.concat([data, encoded_data], axis=1)
return data
def label_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
encoder = preprocessing.LabelEncoder()
for column in encode_data:
new_data = encoder.fit_transform(encode_data[column])
new_data = pd.DataFrame(new_data, columns=[column])
encoded_data = pd.concat([encoded_data, new_data], axis=1)
data.drop(cols, axis=1, inplace=True)
data = pd.concat([data, encoded_data], axis=1)
return data
def gel_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
for column in encode_data:
new_data = pd.get_dummies(encode_data[column], prefix=column)
encoded_data = pd.concat([encoded_data, new_data], axis=1)
embedding, vectors, source_data_ = qgel.qgel(one_hot_data = encoded_data,
k = int(len(cols)),
learning_method = 'unsupervised')
data.drop(cols, axis=1, inplace=True)
data = pd.concat([data, pd.DataFrame(embedding, columns=[x for x in range(0,int(len(cols)))])], axis=1)
return data
def get_labels(data, name):
if name == 'credit_card':
label = data['Class']
ind = np.where(data['Class'] == 1)
data.drop(['Class'], axis = 1, inplace=True)
if name == 'arrhythmia':
label = data['class']
anomaly_classes = [3,4,5,7,8,9,10,14,15]
label = [1 if i in anomaly_classes else 0 for i in label]
label = np.array(label)
data.drop(['class'], axis = 1, inplace=True)
if name == 'kdd':
label = data[41]
label = np.where(label == "normal", 0, 1)
data.drop([41], axis = 1, inplace=True)
if name == 'vehicle_claims':
label = data['Label']
data.drop(['Label'], axis = 1, inplace=True)
if name == 'vehicle_insurance':
label = data['FraudFound_P']
data.drop(['FraudFound_P'], axis = 1, inplace=True)
if name == 'car_insurance':
label = data['fraud_reported']
label = np.where(label == "Y", 1, 0)
data.drop(['fraud_reported'], axis = 1, inplace=True)
return label
def get_normal_data(data, name):
sample_len = len(data)/2
if name == 'credit_card':
label = data['Class']
ind = np.where(data['Class'] == 1)
if name == 'arrhythmia':
label = data['class']
anomaly_classes = [3,4,5,7,8,9,10,14,15]
label = [1 if i in anomaly_classes else 0 for i in label]
label = np.array(label)
if name == 'kdd':
label = data[41]
label = np.where(label == "normal", 0, 1)
if name == 'vehicle_claims':
label = data['Label']
if name == 'vehicle_insurance':
label = data['FraudFound_P']
if name == 'car_insurance':
label = data['fraud_reported']
label = np.where(label == "Y", 1, 0)
data['label'] = label
normal_data = data[data['label'] == 0]
sampled_data = normal_data.sample(n= int(sample_len))
sampled_data = sampled_data.drop('label', axis = 1)
return sampled_data
def get_scores(y_pred, y, scores):
precision = precision_score(y, y_pred, labels = [0], average='weighted')
recall = recall_score(y, y_pred, average='weighted')
f1 = f1_score(y, y_pred, average='weighted')
auc = roc_auc_score(y, scores, average='weighted')
return precision, recall, f1, auc
def get_confusion_matrix(y_pred, y):
tn, fp, fn, tp = confusion_matrix(y_pred, y).ravel()
return tn, fp, fn, tp
def normalize_cols(data):
data =(data - data.mean())/data.std()
#data = pd.DataFrame(data)
return data
def merge_cols(data_1, data_2):
data = pd.concat([data_1, data_2], axis=1)
return data
def remove_cols(data, cols):
data.drop(cols, axis=1, inplace=True)
return data
def split_data(data, Y, split=0.7):
train = data.loc[:split*len(data),]
test = data.loc[split*len(data)+1:,]
Y_train = Y[:int(split*len(data)),]
Y_test = Y[ceil(split*len(data)+1):,]
return train, test, Y_train, Y_test
def fill_na(data):
data = data.fillna(value = 0, axis=1)
return data
def relative_euclidean_distance(x1, x2):
num = torch.norm(x1 - x2, p=2, dim=1)
denom = torch.norm(x1, p=2, dim=1)
return num / torch.max(denom)
def cosine_similarity(x1, x2, eps=1e-8):
dot_prod = torch.sum(x1 * x2, dim=1)
dist_x1 = torch.norm(x1, p=2, dim=1)
dist_x2 = torch.norm(x2, p=2, dim=1)
return dot_prod / torch.max(dist_x1*dist_x2, eps)
| 5,431 | 34.045161 | 108 | py |
UADAD | UADAD-main/train_som.py | import os
import pickle
import gc
import sys
import argparse
import torch
from utils import *
import pandas as pd
from torch import optim
from torch.utils.data import DataLoader
from Code.datasets.car_insurance import Car_insurance
from Code.datasets.vehicle_claims import Vehicle_Claims
from Code.datasets.vehicle_insurance import Vehicle_Insurance
from Code.classic_ML.SOM import som_train, som_pred, som_embedding_data
from minisom import MiniSom
#read_inputs
def parse_args():
parser = argparse.ArgumentParser(description='Anomaly Detection with unsupervised methods')
parser.add_argument('--dataset', dest='dataset', help='training dataset', default='vehicle_claims', type=str)
parser.add_argument('--embedding', dest='embedding', help='True, False', default= False, type=bool)
parser.add_argument('--encoding', dest='encoding', help='one_hot, label', default= 'label_encode', type=str)
parser.add_argument('--numerical', dest='numerical', help='True False', default=False, type=bool)
parser.add_argument('--somsize', dest='somsize', help='10, 20', default = 10, type=int)
parser.add_argument('--somlr', dest='somlr', help='0.1, 1', default = 1, type=float)
parser.add_argument('--somsigma', dest='somsigma', help='0.05, 0.1', default = 0.05, type=float)
parser.add_argument('--somiter', dest='somiter', help='100000', default = 10000, type=int)
parser.add_argument('--mode', dest='mode', help='train', default='train', type=str)
parser.add_argument('--threshold', dest='threshold', help=0.5, default=70, type=float)
args = parser.parse_args()
return args
args = parse_args()
embedding = args.embedding
encoding = args.encoding
numerical = args.numerical
mode = args.mode
save_path = os.path.join("model", "som" + "_" + str(args.dataset) + "_" + str(args.encoding) + "_" + str(args.somsize) + "_" + str(embedding))
#read data
# get labels from dataset and drop them if available
if args.dataset == "car_insurance":
numerical_cols = ['months_as_customer', 'age', 'policy_deductable', 'policy_annual_premium', 'insured_zip' ,'capital-gains',
'capital-loss', 'incident_hour_of_the_day', 'number_of_vehicles_involved', 'bodily_injuries', 'witnesses', 'total_claim_amount',
'injury_claim', 'property_claim', 'vehicle_claim', 'auto_year']
if encoding == "label_encode":
path = 'data/car_insurance/train_label.csv'
test_path = 'data/car_insurance/test_label.csv'
label_file = "data/car_insurance/train_label_Y.csv"
test_label_file = "data/car_insurance/test_label_Y.csv"
if encoding == "one_hot":
path = 'data/car_insurance/train_OH.csv'
label_file = "train_OH_Y.csv"
test_path = 'data/car_insurance/test_OH.csv'
test_label_file = 'data/car_insurance/test_OH_Y.csv'
if encoding == "gel_encode":
path = 'data/car_insurance/train_gel.csv'
label_file = "data/car_insurance/train_gel_Y.csv"
test_path = 'data/car_insurance/test_gel.csv'
test_label_file = "data/car_insurance/test_gel_Y.csv"
if embedding:
path = 'data/car_insurance/train.csv'
test_path = 'data/car_insurance/test.csv'
test_label_file = 'test_Y.csv'
dataset = Car_insurance(embedding_layer = embedding, encoding = encoding, path = path)
test_dataset = Car_insurance(embedding_layer = embedding, encoding = encoding, path = test_path, label_file = test_label_file)
if args.dataset == "vehicle_claims":
numerical_cols = ['Year', 'WeekOfMonth', 'WeekOfMonthClaimed', 'RepNumber', 'DriverRating', 'Age']
if encoding == "label_encode":
path = 'data/vehicle_claims/train_label.csv'
label_file = "data/vehicle_claims/train_label_Y.csv"
test_path = 'data/vehicle_claims/test_label.csv'
test_label_file = "data/vehicle_claims/test_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_claims/train_OH.csv'
label_file = "data/vehicle_claims/train_OH_Y.csv"
test_path = 'data/vehicle_claims/test_OH.csv'
test_label_file = "data/vehicle_claims/test_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_claims/train_gel.csv'
label_file = "data/vehicle_claims/train_gel_Y.csv"
test_path = 'data/vehicle_claims/test_gel.csv'
test_label_file = "data/vehicle_claims/test_gel_Y.csv"
if embedding:
path = 'data/vehicle_claims/train.csv'
test_path = 'data/vehicle_claims/test.csv'
test_label_file = 'test_Y.csv'
dataset = Vehicle_Claims(embedding_layer = embedding, encoding = encoding, path = path)
test_dataset = Vehicle_Claims(embedding_layer = embedding, encoding = encoding, path = test_path, label_file = test_label_file)
if args.dataset == "vehicle_insurance":
numerical_cols = ['Runned_Miles', 'Price', 'repair_cost', 'repair_hours']
if encoding == "label_encode":
path = 'data/vehicle_insurance/train_label.csv'
label_file = "data/vehicle_insurance/train_label_Y.csv"
test_path = 'data/vehicle_insurance/test_label.csv'
test_label_file = "data/vehicle_insurance/test_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_insurance/train_OH.csv'
label_file = "data/vehicle_insurance/train_OH_Y.csv"
test_path = 'data/vehicle_insurance/test_OH.csv'
test_label_file = "data/vehicle_insurance/test_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_insurance/train_gel.csv'
label_file = "data/vehicle_insurance/train_gel_Y.csv"
test_path = 'data/vehicle_insurance/test_gel.csv'
test_label_file = "data/vehicle_insurance/test_gel_Y.csv"
if embedding:
path = 'data/vehicle_insurance/train.csv'
test_path = 'data/vehicle_insurance/test.csv'
test_label_file = 'test_Y.csv'
dataset = Vehicle_Insurance(embedding_layer = embedding, encoding = encoding, path = path)
test_dataset = Vehicle_Insurance(embedding_layer = embedding, encoding = encoding, path = test_path, label_file = test_label_file)
train_data = load_data(path)
test_data = load_data(test_path)
#Select features
if args.numerical:
train_data = train_data[numerical_cols]
print(save_path)
#Convert to torch tensors
if not embedding:
if args.dataset == 'vehicle_insurance':
train_data = train_data.drop('PolicyNumber', axis=1)
test_data = test_data.drop('PolicyNumber', axis=1)
train_data = torch.tensor(train_data.values.astype(np.float32))
test_data = torch.tensor(test_data.values.astype(np.float32))
y_test = load_data(test_label_file)
def train():
if embedding:
emb = dataset.embedding_sizes
dataloader = DataLoader(dataset, batch_size = 32)
som = MiniSom(x= args.somsize, y= args.somsize, input_len=dataset.input_dim, sigma=args.somsigma, learning_rate=args.somlr)
for i, data in enumerate(dataloader):
data = som_embedding_data(data[0], data[1], emb)
data = data.detach().numpy()
som.train_random(data, args.somiter)
else:
dataset = torch.utils.data.TensorDataset(train_data)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
som = MiniSom(x= args.somsize, y= args.somsize, input_len=train_data.shape[1], sigma=args.somsigma, learning_rate=args.somlr)
for i, data in enumerate(dataloader):
print(data[0].shape)
som.train_random(data[0], args.somiter)
print("SOM training done.")
with open(save_path, 'wb') as outfile:
pickle.dump(som, outfile)
def eval():
with open(save_path, 'rb') as infile:
som = pickle.load(infile)
if embedding:
emb = dataset.embedding_sizes
q_error = []
y_test = []
dataloader = DataLoader(test_dataset, batch_size = 512)
for i, data in enumerate(dataloader):
y_test = np.hstack((y_test, data[2]))
data = som_embedding_data(data[0], data[1], emb)
data = data.detach()
error = som_pred(som, data)
q_error = np.hstack((q_error, error))
error_threshold = np.percentile(q_error, (100-args.threshold)/100)
is_anomaly = q_error > error_threshold
y_pred = np.multiply(is_anomaly, 1)
p, r, f, a = get_scores(y_pred, y_test, q_error)
tn, fp, fn, tp = get_confusion_matrix(y_pred, y_test)
pd.DataFrame({'Label': y_test, 'Score': q_error}).to_csv((save_path + '.csv'), index=False)
else:
y_test = load_data(test_label_file)
q_error = som_pred(som, test_data)
error_threshold = np.percentile(q_error, (100-args.threshold)/100)
is_anomaly = q_error > error_threshold
y_pred = np.multiply(is_anomaly, 1)
p, r, f, a = get_scores(y_pred, y_test, q_error)
tn, fp, fn, tp = get_confusion_matrix(y_pred, y_test)
pd.DataFrame({'Label': y_test.iloc[:,0], 'Score': q_error}).to_csv((save_path + '.csv'), index=False)
print("Precision:", p, "Recall:", r, "F1 Score:", f, "AUROC:", a)
print("True Positive:", tp, "False Positive:", fp, "True Negative:", tn, "False Negative:", fn)
if __name__ == '__main__':
if mode == 'train':
train()
else:
eval()
| 9,368 | 44.26087 | 143 | py |
UADAD | UADAD-main/eval.py | import os
import argparse
import numpy as np
import pandas as pd
import torch
from utils import *
from torch.utils.data import DataLoader
from Code.unsupervised_methods.som_dagmm.model import DAGMM, SOM_DAGMM
from Code.unsupervised_methods.som_dagmm.compression_network import CompressionNetwork
from Code.unsupervised_methods.som_dagmm.estimation_network import EstimationNetwork
from Code.unsupervised_methods.som_dagmm.gmm import GMM, Mixture
from Code.unsupervised_methods.rsrae.rsrae import RSRAE
from Code.datasets.car_insurance import Car_insurance
from Code.datasets.vehicle_claims import Vehicle_Claims
from Code.datasets.vehicle_insurance import Vehicle_Insurance
#read_inputs
def parse_args():
parser = argparse.ArgumentParser(description='Anomaly Detection with unsupervised methods')
parser.add_argument('--dataset', dest='dataset', help='training dataset', default='vehicle_claims', type=str)
parser.add_argument('--data', dest='data', help='type of data', default=False, type=bool)
parser.add_argument('--embedding', dest='embedding', help='True, False', default= False, type=bool)
parser.add_argument('--encoding', dest='encoding', help='one_hot, label', default= 'label_encode', type=str)
parser.add_argument('--model', dest='model', help='som, dagmm, somdagmm, rsrae', default='dagmm', type=str)
parser.add_argument('--numerical', dest='numerical', help='True False', default=False, type=bool)
parser.add_argument('--batch_size', dest='batch_size', help='32', default = 32, type=int)
parser.add_argument('--latent_dim', dest='latent_dim', help='1,2', default = 2, type=int)
parser.add_argument('--num_mixtures', dest='num_mixtures', help='1,2', default = 2, type=int)
parser.add_argument('--dim_embed', dest='dim_embed', help='1,2', default = 4, type=int)
parser.add_argument('--rsr_dim', dest='rsr_dim', help='1,2', default = 10, type=int)
parser.add_argument('--epoch', dest='epoch', help='1', default='1', type=int)
parser.add_argument('--threshold', dest='threshold', help=0.5, default=0.5, type=float)
parser.add_argument('--save_path', dest="save_path", help='models/<file_name>', type=str)
parser.add_argument('--som_save_path', dest="som_save_path", help='models/<file_name>', type=str)
args = parser.parse_args()
return args
args = parse_args()
epochs = args.epoch
embedding = args.embedding
encoding = args.encoding
normal = args.data
batch_size = args.batch_size
numerical = args.numerical
save_path = args.save_path
#Test Datasets
if args.dataset == "car_insurance":
if encoding == "label_encode":
path = 'data/car_insurance/test_label.csv'
label_file = "test_label_Y.csv"
if encoding == "one_hot":
path = 'data/car_insurance/test_OH.csv'
label_file = "test_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/car_insurance/test_gel.csv'
label_file = "test_gel_Y.csv"
if embedding:
path = 'data/car_insurance/test.csv'
label_file = 'test_Y.csv'
dataset = Car_insurance(embedding_layer = embedding, encoding = encoding, path = path, label_file=label_file)
if args.dataset == "vehicle_claims":
path = 'data/vehicle_claims/test.csv'
label_file = 'test_Y.csv'
if encoding == "label_encode":
path = 'data/vehicle_claims/test_label.csv'
label_file = "test_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_claims/test_OH.csv'
label_file = "test_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_claims/test_gel.csv'
label_file = "test_gel_Y.csv"
if embedding:
path = 'data/vehicle_claims/test.csv'
label_file = 'test_Y.csv'
dataset = Vehicle_Claims(embedding_layer = embedding, encoding = encoding, path = path, label_file=label_file)
if args.dataset == "vehicle_insurance":
if encoding == "label_encode":
path = 'data/vehicle_insurance/test_label.csv'
label_file = "test_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_insurance/test_OH.csv'
label_file = "test_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_insurance/test_gel.csv'
label_file = "test_gel_Y.csv"
if embedding:
path = 'data/vehicle_insurance/test.csv'
label_file = 'test_Y.csv'
dataset = Vehicle_Insurance(embedding_layer = embedding, encoding = encoding, path = path, label_file=label_file)
#Parameters for embedding layer initialization and numerical features
emb = None
if embedding:
emb = dataset.embedding_sizes
input_dim = dataset.input_dim
output_dim = dataset.output_dim
if numerical:
input_dim = output_dim = dataset.cont_cols.shape[1]
#DataLoader
dataloader = DataLoader(dataset, batch_size= batch_size)
score = []
label = []
if args.model == "dagmm":
from Code.unsupervised_methods.dagmm_self.model import DAGMM
from Code.unsupervised_methods.dagmm_self.compression_network import CompressionNetwork
from Code.unsupervised_methods.dagmm_self.estimation_network import EstimationNetwork
from Code.unsupervised_methods.dagmm_self.gmm import GMM, Mixture
compression = CompressionNetwork(embedding, numerical, input_dim, output_dim, emb, args.latent_dim)
estimation = EstimationNetwork(args.dim_embed, args.num_mixtures)
gmm = GMM(args.num_mixtures,args.dim_embed)
mix = Mixture(args.dim_embed)
net = DAGMM(compression, estimation, gmm)
path = os.path.split(save_path)
#net.load_state_dict(torch.load(save_path), strict=False)
net = torch.load(save_path)
net.eval()
for i, data in enumerate(dataloader):
rec_data = torch.cat([data[0], data[1]], -1)
if numerical:
rec_data = data[1]
out = net(data[0], data[1], rec_data)
out = out.detach().numpy().reshape(-1)
L = data[2].detach().numpy().reshape(-1)
score = np.hstack((score, out))
label = np.hstack((label, L))
threshold = np.percentile(score, (100 - args.threshold), axis=0)
y_pred = (score < threshold).astype(int)
y_test = label
if args.model == "somdagmm":
compression = CompressionNetwork(embedding, numerical, input_dim, output_dim, emb, args.latent_dim)
estimation = EstimationNetwork(args.dim_embed, args.num_mixtures)
gmm = GMM(args.num_mixtures,args.dim_embed)
mix = Mixture(args.dim_embed)
dagmm = DAGMM(compression, estimation, gmm)
net = SOM_DAGMM(dagmm, embedding, numerical, emb)
net = torch.load(save_path)
net.eval()
for i, data in enumerate(dataloader):
rec_data = torch.cat([data[0], data[1]], -1)
if numerical:
rec_data = data[1]
out = net(data[0], data[1], rec_data, args.som_save_path)
out = out.detach().numpy().reshape(-1)
L = data[2].detach().numpy().reshape(-1)
score = np.hstack((score, out))
label = np.hstack((label, L))
threshold = np.percentile(score, (100 - args.threshold), axis=0)
print(threshold)
y_pred = (score < threshold).astype(int)
y_test = label
if args.model == "rsrae":
net = RSRAE(embedding, numerical, input_dim, output_dim, emb, args.rsr_dim, args.latent_dim)
net.load_state_dict(torch.load(save_path))
#net = torch.load(save_path)
net.eval()
for i, data in enumerate(dataloader):
enc, dec, latent, A = net(data[0], data[1])
rec_data = torch.cat([data[0], data[1]], -1)
if numerical:
rec_data = data[1]
out = relative_euclidean_distance(rec_data, dec)
out = out.detach().numpy().reshape(-1)
L = data[2].detach().numpy().reshape(-1)
score = np.hstack((score, out))
label = np.hstack((label, L))
threshold = np.percentile(score, args.threshold, axis=0)
y_pred = (score > threshold).astype(int)
y_test = label
#print(y_pred, score)
# Precision, Recall, F1
pd.DataFrame({'Score':score, 'Label':y_test}).to_csv((save_path + '.csv'), index=False)
p, r, f, a = get_scores(y_pred, y_test, score)
tn, fp, fn, tp = get_confusion_matrix(y_pred, y_test)
print("Precision:", p, "Recall:", r, "F1 Score:", f, "AUROC:", a)
print("True Positive:", tp, "False Positive:", fp, "True Negative:", tn, "False Negative:", fn)
| 8,306 | 42.265625 | 117 | py |
UADAD | UADAD-main/train.py | import os
import pickle
import gc
import sys
import argparse
import torch
from Code.datasets.vehicle_claims import Vehicle_Claims
from utils import *
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from Code.unsupervised_methods.rsrae.rsrae import RSRLoss
from Code.unsupervised_methods.rsrae.rsrae import RSRAE
from Code.datasets.car_insurance import Car_insurance
from Code.datasets.vehicle_claims import Vehicle_Claims
from Code.datasets.vehicle_insurance import Vehicle_Insurance
#read_inputs
def parse_args():
parser = argparse.ArgumentParser(description='Anomaly Detection with unsupervised methods')
parser.add_argument('--dataset', dest='dataset', help='training dataset', default='vehicle_claims', type=str)
parser.add_argument('--data', dest='data', help='type of data', default=False, type=bool)
parser.add_argument('--embedding', dest='embedding', help='True, False', default= False, type=bool)
parser.add_argument('--encoding', dest='encoding', help='one_hot, label', default= 'label_encode', type=str)
parser.add_argument('--model', dest='model', help='som, dagmm, somdagmm, rsrae', default='dagmm', type=str)
parser.add_argument('--numerical', dest='numerical', help='True False', default=False, type=bool)
parser.add_argument('--batch_size', dest='batch_size', help='32', default = 32, type=int)
parser.add_argument('--latent_dim', dest='latent_dim', help='1,2', default = 2, type=int)
parser.add_argument('--num_mixtures', dest='num_mixtures', help='1,2', default = 2, type=int)
parser.add_argument('--dim_embed', dest='dim_embed', help='1,2', default = 4, type=int)
parser.add_argument('--rsr_dim', dest='rsr_dim', help='1,2', default = 10, type=int)
parser.add_argument('--epoch', dest='epoch', help='1', default='1', type=int)
parser.add_argument('--file_name', dest='file_name', help='model_data_embed_encode_latent_dim_parameters', type=str)
parser.add_argument('--som_save_path', dest="som_save_path", help='models/<file_name>', type=str)
args = parser.parse_args()
return args
args = parse_args()
epochs = args.epoch
embedding = args.embedding
encoding = args.encoding
normal = args.data
batch_size = args.batch_size
numerical = args.numerical
som_save_path = args.som_save_path
save_path = os.path.join("model", args.file_name )
writer = SummaryWriter()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
#Auditing Datasets
if args.dataset == "car_insurance":
if encoding == "label_encode":
path = 'data/car_insurance/train_label.csv'
label_file = "train_label_Y.csv"
if encoding == "one_hot":
path = 'data/car_insurance/train_OH.csv'
label_file = "train_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/car_insurance/train_gel.csv'
label_file = "train_gel_Y.csv"
if embedding:
path = 'data/car_insurance/train.csv'
dataset = Car_insurance(embedding_layer = embedding, path = path, label_file=label_file)
if args.dataset == "vehicle_claims":
if encoding == "label_encode":
path = 'data/vehicle_claims/train_label.csv'
label_file = "train_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_claims/train_OH.csv'
label_file = "train_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_claims/train_gel.csv'
label_file = "train_gel_Y.csv"
if embedding:
path = 'data/vehicle_claims/train.csv'
dataset = Vehicle_Claims(embedding_layer = embedding, encoding = encoding, path = path, label_file=label_file)
if args.dataset == "vehicle_insurance":
if encoding == "label_encode":
path = 'data/vehicle_insurance/train_label.csv'
label_file = "train_label_Y.csv"
if encoding == "one_hot":
path = 'data/vehicle_insurance/train_OH.csv'
label_file = "train_OH_Y.csv"
if encoding == "gel_encode":
path = 'data/vehicle_insurance/train_gel.csv'
label_file = "train_gel_Y.csv"
if embedding:
path = 'data/vehicle_insurance/train.csv'
dataset = Vehicle_Insurance(embedding_layer = embedding, encoding = encoding, path = path, label_file=label_file)
#Parameters for embedding layer initialization and numerical features
emb = None
if embedding:
emb = dataset.embedding_sizes
input_dim = dataset.input_dim
output_dim = dataset.output_dim
if numerical:
input_dim = output_dim = dataset.cont_cols.shape[1]
#DataLoader
dataloader = DataLoader(dataset, batch_size= batch_size)
#Training Models
if args.model == "dagmm":
from Code.unsupervised_methods.dagmm_self.model import DAGMM
from Code.unsupervised_methods.dagmm_self.compression_network import CompressionNetwork
from Code.unsupervised_methods.dagmm_self.estimation_network import EstimationNetwork
from Code.unsupervised_methods.dagmm_self.gmm import GMM, Mixture
compression = CompressionNetwork(embedding, numerical, input_dim, output_dim, emb, args.latent_dim)
estimation = EstimationNetwork(args.dim_embed, args.num_mixtures)
gmm = GMM(args.num_mixtures,args.dim_embed)
mix = Mixture(args.dim_embed)
net = DAGMM(compression, estimation, gmm)
optimizer = optim.Adam(net.parameters(), lr=1e-3)
for epoch in range(epochs):
print('EPOCH {}:'.format(epoch + 1))
running_loss = 0
for i, data in enumerate(dataloader):
rec_data = torch.cat([data[0], data[1]], -1)
if numerical:
rec_data = data[1]
out = net(data[0], data[1], rec_data)
optimizer.zero_grad()
L_loss = compression.reconstruction_loss(data[0], data[1], rec_data)
G_loss = mix.gmm_loss(out=out, L1=1, L2=0.05)
print(L_loss, G_loss)
loss = (L_loss + G_loss)/ len(data[1])
loss.backward()
optimizer.step()
running_loss += loss.item()
writer.add_scalar("Loss/train", running_loss, epoch)
print(running_loss)
writer.flush()
torch.save(net, save_path)
if args.model == "somdagmm":
from Code.unsupervised_methods.som_dagmm.model import DAGMM, SOM_DAGMM
from Code.unsupervised_methods.som_dagmm.compression_network import CompressionNetwork
from Code.unsupervised_methods.som_dagmm.estimation_network import EstimationNetwork
from Code.unsupervised_methods.som_dagmm.gmm import GMM, Mixture
compression = CompressionNetwork(embedding, numerical, input_dim, output_dim, emb, args.latent_dim)
estimation = EstimationNetwork(args.dim_embed, args.num_mixtures)
gmm = GMM(args.num_mixtures,args.dim_embed)
mix = Mixture(args.dim_embed)
dagmm = DAGMM(compression, estimation, gmm)
net = SOM_DAGMM(dagmm, embedding, numerical, emb)
optimizer = optim.Adam(net.parameters(), lr=1e-3)
for epoch in range(epochs):
print('EPOCH {}:'.format(epoch + 1))
running_loss = 0
for i, data in enumerate(dataloader):
rec_data = torch.cat([data[0], data[1]], -1)
if numerical:
rec_data = data[1]
out = net(data[0], data[1], rec_data, som_save_path)
optimizer.zero_grad()
L_loss = compression.reconstruction_loss(data[0], data[1], rec_data)
G_loss = mix.gmm_loss(out=out, L1=1, L2=0.05)
loss = (L_loss + G_loss)/ len(data[1])
print(L_loss, G_loss)
loss.backward()
optimizer.step()
running_loss += loss.item()
writer.add_scalar("Loss/train", running_loss, epoch)
print(running_loss)
writer.flush()
torch.save(net, save_path)
if args.model == "rsrae":
rsr = RSRLoss(0.1,0.1, args.rsr_dim, args.latent_dim).to(device)
net = RSRAE(embedding, numerical, input_dim, output_dim, emb, args.rsr_dim, args.latent_dim)
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=1e-3)
for epoch in range(epochs):
print('EPOCH {}:'.format(epoch + 1))
running_loss = 0
for i, data in enumerate(dataloader):
optimizer.zero_grad()
enc, dec, latent, A = net(data[0].to(device), data[1].to(device))
rec_data = torch.cat([data[0], data[1]], -1).to(device)
if numerical:
rec_data = data[1]
rec_loss = net.L21(dec,rec_data).to(device)
rsr_loss = rsr(enc, A).to(device)
loss = (rec_loss + rsr_loss)/ len(data[1])
loss.backward()
optimizer.step()
running_loss += loss.item()
writer.add_scalar("Loss/train", running_loss, epoch)
print(running_loss)
writer.flush()
torch.save(net.state_dict(), save_path)
| 8,837 | 42.752475 | 120 | py |
UADAD | UADAD-main/Code/unsupervised_methods/rsrae/rsrae.py | import torch
from torch import nn
import torch.nn.functional as F
from fastai.layers import Embedding
from fastai.torch_core import Module
from typing import List, Tuple
class RSRLayer(nn.Module):
def __init__(self, d, D):
super().__init__()
self.d = d
self.D = D
self.A = nn.Parameter(torch.nn.init.orthogonal_(torch.empty(d, D)))
def forward(self, z):
x = self.A @ z.view(z.size(0), self.D, 1)
return x.squeeze(2)
class RSRLoss(nn.Module):
def __init__(self, L1, L2, d, D):
super().__init__()
self.L1 = L1
self.L2 = L2
self.d = d
self.D = D
self.register_buffer("Id", torch.eye(d))
def forward(self, z, A):
z_hat = A @ z.view(z.size(0), self.D, 1)
AtAz = (A.T @ z_hat).squeeze(2)
term1 = torch.sum(torch.norm(z -AtAz, p=2))
term2 = torch.sum(torch.norm(A@A.T - self.Id, p=2))**2
return self.L1 * term1 + self.L2 * term2
class EmbeddingLayer(Module):
def __init__(self, emb_szs: List[Tuple[int, int]]):
self.embeddings = torch.nn.ModuleList([Embedding(in_sz, out_sz) for in_sz, out_sz in emb_szs])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = [emb(x[..., i]) for i, emb in enumerate(self.embeddings)]
x = torch.cat(x, dim=-1)
return x
class RSRAE(nn.Module):
def __init__(self,embedding, numerical, input_dim, output_dim, emb_szs, d, D):
super().__init__()
self.embedding = embedding
self.numerical = numerical
if self.embedding:
self.emb_layer = EmbeddingLayer(emb_szs)
self.encoder = nn.Sequential(
nn.Linear(input_dim, 32),
nn.LeakyReLU(),
nn.Linear(32, 64),
nn.LeakyReLU(),
nn.Linear(64, D)
)
self.rsr = RSRLayer(d, D)
self.decoder = nn.Sequential(
nn.Linear(d, D),
nn.LeakyReLU(),
nn.Linear(D, 64),
nn.LeakyReLU(),
nn.Linear(64, 32),
nn.LeakyReLU(),
nn.Linear(32, output_dim)
)
def forward(self, x_cat, x_cont):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
encoded = self.encoder(x)
latent = self.rsr(encoded)
decoded = self.decoder(F.normalize(latent, p=2))
return encoded, decoded, latent, self.rsr.A
def L21(self, y_hat, y):
return torch.sum(torch.pow(torch.norm(y - y_hat, p=2), 1))
| 2,646 | 29.77907 | 102 | py |
UADAD | UADAD-main/Code/unsupervised_methods/som_dagmm/estimation_network.py | import torch
from torch import nn
class EstimationNetwork(nn.Module):
"""Defines a estimation network."""
def __init__(self, dim_embed, num_mixtures):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim_embed, 10),
nn.Tanh(),
nn.Dropout(p=0.5),
nn.Linear(10, num_mixtures),
nn.Softmax(dim=1))
def forward(self, input):
return self.net(input) | 518 | 31.4375 | 61 | py |
UADAD | UADAD-main/Code/unsupervised_methods/som_dagmm/model.py | """Implements all the components of the DAGMM model."""
from pandas import NA
import torch
import numpy as np
from torch import nn
import pickle
from minisom import MiniSom
from Code.classic_ML.SOM import som_train
from fastai.layers import Embedding
from fastai.torch_core import Module
from typing import List, Tuple
eps = torch.autograd.Variable(torch.FloatTensor([1.e-8]), requires_grad=False)
class EmbeddingLayer(Module):
def __init__(self, emb_szs: List[Tuple[int, int]]):
self.embeddings = torch.nn.ModuleList([Embedding(in_sz, out_sz) for in_sz, out_sz in emb_szs])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = [emb(x[..., i]) for i, emb in enumerate(self.embeddings)]
x = torch.cat(x, dim=-1)
return x
class SOM_DAGMM(nn.Module):
def __init__(self,dagmm, embedding, numerical, emb):
super().__init__()
self.dagmm = dagmm
self.embedding = embedding
self.numerical = numerical
if self.embedding:
self.emb_layer = EmbeddingLayer(emb)
def forward(self, x_cat, x_cont, rec_data, save_path=None):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
x = x.detach().numpy()
with open(save_path, 'rb') as infile:
som = pickle.load(infile)
winners = [som.winner(i) for i in x]
winners = torch.tensor([normalize_tuple(winners[i], 10) for i in range(len(winners))], dtype=torch.float32)
return self.dagmm(x_cat, x_cont, rec_data, winners)
class DAGMM(nn.Module):
def __init__(self, compression_module, estimation_module, gmm_module):
"""
Args:
compression_module (nn.Module): an autoencoder model that
implements at leat a function `self.encoder` to get the
encoding of a given input.
estimation_module (nn.Module): a FFNN model that estimates the
memebership of each input to a each mixture of a GMM.
gmm_module (nn.Module): a GMM model that implements its mixtures
as a list of Mixture classes. The GMM model should implement
the function `self._update_mixtures_parameters`.
"""
super().__init__()
self.compressor = compression_module
self.estimator = estimation_module
self.gmm = gmm_module
def forward(self, x_cat, x_cont, rec_data, winners):
# Forward in the compression network.
encoded = self.compressor.encode(x_cat, x_cont)
decoded = self.compressor.decode(encoded)
# Preparing the input for the estimation network.
relative_ed = relative_euclidean_distance(rec_data, decoded)
cosine_sim = cosine_similarity(rec_data, decoded)
# Adding a dimension to prepare for concatenation.
relative_ed = relative_ed.view(-1, 1)
cosine_sim = relative_ed.view(-1, 1)
latent_vectors = torch.cat([encoded, relative_ed, cosine_sim, winners], dim=1)
# latent_vectors has shape [batch_size, dim_embedding + 2]
# Updating the parameters of the mixture.
if self.training:
mixtures_affiliations = self.estimator(latent_vectors)
# mixtures_affiliations has shape [batch_size, num_mixtures]
self.gmm._update_mixtures_parameters(latent_vectors,
mixtures_affiliations)
# Estimating the energy of the samples.
return self.gmm(latent_vectors)
def relative_euclidean_distance(x1, x2, eps=eps):
"""x1 and x2 are assumed to be Variables or Tensors.
They have shape [batch_size, dimension_embedding]"""
num = torch.norm(x1 - x2, p=2, dim=1) # dim [batch_size]
denom = torch.norm(x1, p=2, dim=1) # dim [batch_size]
return num / torch.max(denom, eps)
def cosine_similarity(x1, x2, eps=eps):
"""x1 and x2 are assumed to be Variables or Tensors.
They have shape [batch_size, dimension_embedding]"""
dot_prod = torch.sum(x1 * x2, dim=1) # dim [batch_size]
dist_x1 = torch.norm(x1, p=2, dim=1) # dim [batch_size]
dist_x2 = torch.norm(x2, p=2, dim=1) # dim [batch_size]
return dot_prod / torch.max(dist_x1*dist_x2, eps)
def normalize_tuple(x, norm_val):
a, b = x
a = a/norm_val
b = b/norm_val
return (a,b)
| 4,471 | 36.266667 | 115 | py |
UADAD | UADAD-main/Code/unsupervised_methods/som_dagmm/gmm.py | """Implements a GMM model."""
import torch
import numpy as np
from torch import nn
class GMM(nn.Module):
"""Implements a Gaussian Mixture Model."""
def __init__(self, num_mixtures, dimension_embedding):
"""Creates a Gaussian Mixture Model.
Args:
num_mixtures (int): the number of mixtures the model should have.
dimension_embedding (int): the number of dimension of the embedding
space (can also be thought as the input dimension of the model)
"""
super().__init__()
self.num_mixtures = num_mixtures
self.dimension_embedding = dimension_embedding
mixtures = [Mixture(dimension_embedding) for _ in range(num_mixtures)]
print(mixtures)
self.mixtures = nn.ModuleList(mixtures)
def forward(self, inputs):
out = None
for mixture in self.mixtures:
to_add = mixture(inputs, with_log=False)
if out is None:
out = to_add
else:
out += to_add
return -torch.log(out)
def _update_mixtures_parameters(self, samples, mixtures_affiliations):
"""
Args:
samples (Variable of shape [batch_size, dimension_embedding]):
typically the input of the estimation network. The points
in the embedding space.
mixtures_affiliations (Variable of shape [batch_size, num_mixtures])
the probability of affiliation of each sample to each mixture.
Typically the output of the estimation network.
"""
if not self.training:
# This function should not be used when we are in eval mode.
return
for i, mixture in enumerate(self.mixtures):
affiliations = mixtures_affiliations[:, i]
mixture._update_parameters(samples, affiliations)
class Mixture(nn.Module):
def __init__(self, dimension_embedding):
super().__init__()
self.dimension_embedding = dimension_embedding
self.Phi = np.random.random([1])
self.Phi = torch.from_numpy(self.Phi).float()
self.Phi = nn.Parameter(self.Phi, requires_grad=False)
# Mu is the center/mean of the mixtures.
self.mu = 2.*np.random.random([dimension_embedding]) - 0.5
self.mu = torch.from_numpy(self.mu).float()
self.mu = nn.Parameter(self.mu, requires_grad=False)
# Sigma encodes the shape of the gaussian 'bubble' of a given mixture.
self.Sigma = np.eye(dimension_embedding, dimension_embedding)
self.Sigma = torch.from_numpy(self.Sigma).float()
self.Sigma = nn.Parameter(self.Sigma, requires_grad=False)
# We'll use this to augment the diagonal of Sigma and make sure it is
# inversible.
self.eps_Sigma = torch.FloatTensor(
np.diag([1.e-8 for _ in range(dimension_embedding)]))
def forward(self, samples, with_log=True):
"""Samples has shape [batch_size, dimension_embedding]"""
# TODO: cache the matrix inverse and determinant?
# TODO: so ugly and probably inefficient: do we have to create those
# new variables and conversions from numpy?
batch_size, _ = samples.shape
out_values = []
inv_sigma = torch.inverse(self.Sigma)
det_sigma = np.linalg.det(self.Sigma.data.cpu().numpy())
det_sigma = torch.from_numpy(det_sigma.reshape([1])).float()
det_sigma = torch.autograd.Variable(det_sigma)
for sample in samples:
diff = (sample - self.mu).view(-1, 1)
#det_sigma = torch.from_numpy(det_sigma).float()
out = -0.5 * torch.mm(torch.mm(diff.view(1, -1), inv_sigma), diff)
out = (self.Phi * torch.exp(out)) / (torch.sqrt(2. * np.pi * det_sigma))
if with_log:
out = -torch.log(out)
out_values.append(float(out.data.cpu().numpy()))
out = torch.autograd.Variable(torch.FloatTensor(out_values))
return out
def _update_parameters(self, samples, affiliations):
"""
Args:
samples (Variable of shape [batch_size, dimension_embedding]):
typically the input of the estimation network. The points
in the embedding space.
mixtures_affiliations (Variable of shape [batch_size])
the probability of affiliation of each sample to each mixture.
Typically the output of the estimation network.
"""
if not self.training:
# This function should not be used when we are in eval mode.
return
batch_size, _ = samples.shape
# Updating phi.
phi = torch.mean(affiliations)
self.Phi.data = phi.data
# Updating mu.
num = 0.
for i in range(batch_size):
z_i = samples[i, :]
gamma_i = affiliations[i]
num += gamma_i * z_i
denom = torch.sum(affiliations)
self.mu.data = (num / denom).data
# Updating Sigma.
mu = self.mu
num = None
for i in range(batch_size):
z_i = samples[i, :]
gamma_i = affiliations[i]
diff = (z_i - mu).view(-1, 1)
to_add = gamma_i * torch.mm(diff, diff.view(1, -1))
if num is None:
num = to_add
else:
num += to_add
denom = torch.sum(affiliations)
self.Sigma.data = (num / denom).data + self.eps_Sigma
def gmm_loss (self, out, L1, L2):
term1 = (L1 * torch.sum(out))/len(out)
k, D = self.Sigma.size()
cov_diag = 0
for i in range(k):
cov_diag = cov_diag + torch.sum(1/(self.Sigma.diag() + 1e-8))
term2 = L2 * cov_diag
return (term1 + term2) | 5,867 | 36.375796 | 84 | py |
UADAD | UADAD-main/Code/unsupervised_methods/som_dagmm/compression_network.py | """Defines the compression network."""
import torch
from torch import nn
from fastai.layers import Embedding
from fastai.torch_core import Module
from typing import List, Tuple
class EmbeddingLayer(Module):
def __init__(self, emb_szs: List[Tuple[int, int]]):
self.embeddings = torch.nn.ModuleList([Embedding(in_sz, out_sz) for in_sz, out_sz in emb_szs])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = [emb(x[..., i]) for i, emb in enumerate(self.embeddings)]
x = torch.cat(x, dim=-1)
return x
class CompressionNetwork(nn.Module ):
"""Defines a compression network."""
def __init__(self, embedding, numerical, input_dim, output_dim, emb_szs, latent_dim):
super().__init__()
self.embedding = embedding
self.numerical = numerical
if self.embedding:
self.emb_layer = EmbeddingLayer(emb_szs)
self.encoder = nn.Sequential(nn.Linear(input_dim, 10),
nn.Tanh(),
nn.Linear(10, latent_dim))
self.decoder = nn.Sequential(nn.Linear(latent_dim, 10),
nn.Tanh(),
nn.Linear(10, output_dim))
self._reconstruction_loss = nn.MSELoss()
def forward(self, x_cat, x_cont):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def encode(self, x_cat, x_cont):
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
return self.encoder(x)
def decode(self, input):
return self.decoder(input)
def reconstruction_loss(self, x_cat, x_cont, rec_data):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return self._reconstruction_loss(decoded, rec_data) | 2,267 | 32.352941 | 102 | py |
UADAD | UADAD-main/Code/unsupervised_methods/dagmm_self/estimation_network.py | import torch
from torch import nn
class EstimationNetwork(nn.Module):
"""Defines a estimation network."""
def __init__(self, dim_embed, num_mixtures):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim_embed, 10),
nn.Tanh(),
nn.Dropout(p=0.5),
nn.Linear(10, num_mixtures),
nn.Softmax(dim=1))
def forward(self, input):
return self.net(input) | 518 | 31.4375 | 61 | py |
UADAD | UADAD-main/Code/unsupervised_methods/dagmm_self/model.py | """Implements all the components of the DAGMM model."""
import torch
import numpy as np
from torch import nn
eps = torch.autograd.Variable(torch.FloatTensor([1.e-8]), requires_grad=False)
class DAGMM(nn.Module):
def __init__(self, compression_module, estimation_module, gmm_module):
"""
Args:
compression_module (nn.Module): an autoencoder model that
implements at leat a function `self.encoder` to get the
encoding of a given input.
estimation_module (nn.Module): a FFNN model that estimates the
memebership of each input to a each mixture of a GMM.
gmm_module (nn.Module): a GMM model that implements its mixtures
as a list of Mixture classes. The GMM model should implement
the function `self._update_mixtures_parameters`.
"""
super().__init__()
self.compressor = compression_module
self.estimator = estimation_module
self.gmm = gmm_module
def forward(self, x_cat, x_cont, rec_data):
# Forward in the compression network.
encoded = self.compressor.encode(x_cat, x_cont)
decoded = self.compressor.decode(encoded)
# Preparing the input for the estimation network.
relative_ed = relative_euclidean_distance(rec_data, decoded)
cosine_sim = cosine_similarity(rec_data, decoded)
# Adding a dimension to prepare for concatenation.
relative_ed = relative_ed.view(-1, 1)
cosine_sim = relative_ed.view(-1, 1)
latent_vectors = torch.cat([encoded, relative_ed, cosine_sim], dim=1)
# latent_vectors has shape [batch_size, dim_embedding + 2]
# Updating the parameters of the mixture.
if self.training:
mixtures_affiliations = self.estimator(latent_vectors)
# mixtures_affiliations has shape [batch_size, num_mixtures]
self.gmm._update_mixtures_parameters(latent_vectors,
mixtures_affiliations)
# Estimating the energy of the samples.
return self.gmm(latent_vectors)
def relative_euclidean_distance(x1, x2, eps=eps):
"""x1 and x2 are assumed to be Variables or Tensors.
They have shape [batch_size, dimension_embedding]"""
num = torch.norm(x1 - x2, p=2, dim=1) # dim [batch_size]
denom = torch.norm(x1, p=2, dim=1) # dim [batch_size]
return num / torch.max(denom, eps)
def cosine_similarity(x1, x2, eps=eps):
"""x1 and x2 are assumed to be Variables or Tensors.
They have shape [batch_size, dimension_embedding]"""
dot_prod = torch.sum(x1 * x2, dim=1) # dim [batch_size]
dist_x1 = torch.norm(x1, p=2, dim=1) # dim [batch_size]
dist_x2 = torch.norm(x2, p=2, dim=1) # dim [batch_size]
return dot_prod / torch.max(dist_x1*dist_x2, eps)
| 2,870 | 39.43662 | 78 | py |
UADAD | UADAD-main/Code/unsupervised_methods/dagmm_self/gmm.py | """Implements a GMM model."""
import torch
import numpy as np
from torch import nn
class GMM(nn.Module):
"""Implements a Gaussian Mixture Model."""
def __init__(self, num_mixtures, dimension_embedding):
"""Creates a Gaussian Mixture Model.
Args:
num_mixtures (int): the number of mixtures the model should have.
dimension_embedding (int): the number of dimension of the embedding
space (can also be thought as the input dimension of the model)
"""
super().__init__()
self.num_mixtures = num_mixtures
self.dimension_embedding = dimension_embedding
mixtures = [Mixture(dimension_embedding) for _ in range(num_mixtures)]
self.mixtures = nn.ModuleList(mixtures)
def forward(self, inputs):
out = None
for mixture in self.mixtures:
to_add = mixture(inputs, with_log=False)
if out is None:
out = to_add
else:
out += to_add
return -torch.log(out)
def _update_mixtures_parameters(self, samples, mixtures_affiliations):
"""
Args:
samples (Variable of shape [batch_size, dimension_embedding]):
typically the input of the estimation network. The points
in the embedding space.
mixtures_affiliations (Variable of shape [batch_size, num_mixtures])
the probability of affiliation of each sample to each mixture.
Typically the output of the estimation network.
"""
if not self.training:
# This function should not be used when we are in eval mode.
return
for i, mixture in enumerate(self.mixtures):
affiliations = mixtures_affiliations[:, i]
mixture._update_parameters(samples, affiliations)
class Mixture(nn.Module):
def __init__(self, dimension_embedding):
super().__init__()
self.dimension_embedding = dimension_embedding
self.Phi = np.random.random([1])
self.Phi = torch.from_numpy(self.Phi).float()
self.Phi = nn.Parameter(self.Phi, requires_grad=False)
# Mu is the center/mean of the mixtures.
self.mu = 2.*np.random.random([dimension_embedding]) - 0.5
self.mu = torch.from_numpy(self.mu).float()
self.mu = nn.Parameter(self.mu, requires_grad=False)
# Sigma encodes the shape of the gaussian 'bubble' of a given mixture.
self.Sigma = np.eye(dimension_embedding, dimension_embedding)
self.Sigma = torch.from_numpy(self.Sigma).float()
self.Sigma = nn.Parameter(self.Sigma, requires_grad=False)
# We'll use this to augment the diagonal of Sigma and make sure it is
# inversible.
self.eps_Sigma = torch.FloatTensor(
np.diag([1.e-8 for _ in range(dimension_embedding)]))
def forward(self, samples, with_log=True):
"""Samples has shape [batch_size, dimension_embedding]"""
# TODO: cache the matrix inverse and determinant?
# TODO: so ugly and probably inefficient: do we have to create those
# new variables and conversions from numpy?
batch_size, _ = samples.shape
out_values = []
inv_sigma = torch.inverse(self.Sigma)
det_sigma = np.linalg.det(self.Sigma.data.cpu().numpy())
det_sigma = torch.from_numpy(det_sigma.reshape([1])).float()
det_sigma = torch.autograd.Variable(det_sigma)
for sample in samples:
diff = (sample - self.mu).view(-1, 1)
#det_sigma = torch.from_numpy(det_sigma).float()
out = -0.5 * torch.mm(torch.mm(diff.view(1, -1), inv_sigma), diff)
out = (self.Phi * torch.exp(out)) / (torch.sqrt(2. * np.pi * det_sigma))
if with_log:
out = -torch.log(out)
out_values.append(float(out.data.cpu().numpy()))
out = torch.autograd.Variable(torch.FloatTensor(out_values))
return out
def _update_parameters(self, samples, affiliations):
"""
Args:
samples (Variable of shape [batch_size, dimension_embedding]):
typically the input of the estimation network. The points
in the embedding space.
mixtures_affiliations (Variable of shape [batch_size])
the probability of affiliation of each sample to each mixture.
Typically the output of the estimation network.
"""
if not self.training:
# This function should not be used when we are in eval mode.
return
batch_size, _ = samples.shape
# Updating phi.
phi = torch.mean(affiliations)
self.Phi.data = phi.data
# Updating mu.
num = 0.
for i in range(batch_size):
z_i = samples[i, :]
gamma_i = affiliations[i]
num += gamma_i * z_i
denom = torch.sum(affiliations)
self.mu.data = (num / denom).data
# Updating Sigma.
mu = self.mu
num = None
for i in range(batch_size):
z_i = samples[i, :]
gamma_i = affiliations[i]
diff = (z_i - mu).view(-1, 1)
to_add = gamma_i * torch.mm(diff, diff.view(1, -1))
if num is None:
num = to_add
else:
num += to_add
denom = torch.sum(affiliations)
self.Sigma.data = (num / denom).data + self.eps_Sigma
def gmm_loss (self, out, L1, L2):
term1 = (L1 * torch.sum(out))/len(out)
k, D = self.Sigma.size()
cov_diag = 0
for i in range(k):
cov_diag = cov_diag + torch.sum(1/(self.Sigma.diag() + 1e-7))
term2 = L2 * cov_diag
return (term1 + term2)
| 5,845 | 36 | 84 | py |
UADAD | UADAD-main/Code/unsupervised_methods/dagmm_self/compression_network.py | """Defines the compression network."""
import torch
from torch import nn
from fastai.layers import Embedding
from fastai.torch_core import Module
from typing import List, Tuple
class EmbeddingLayer(Module):
def __init__(self, emb_szs: List[Tuple[int, int]]):
self.embeddings = torch.nn.ModuleList([Embedding(in_sz, out_sz) for in_sz, out_sz in emb_szs])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = [emb(x[..., i]) for i, emb in enumerate(self.embeddings)]
x = torch.cat(x, dim=-1)
return x
class CompressionNetwork(nn.Module ):
"""Defines a compression network."""
def __init__(self, embedding, numerical, input_dim, output_dim, emb_szs, latent_dim):
super().__init__()
self.embedding = embedding
self.numerical = numerical
if self.embedding:
self.emb_layer = EmbeddingLayer(emb_szs)
self.encoder = nn.Sequential(nn.Linear(input_dim, 10),
nn.Tanh(),
nn.Linear(10, latent_dim))
self.decoder = nn.Sequential(nn.Linear(latent_dim, 10),
nn.Tanh(),
nn.Linear(10, output_dim))
self._reconstruction_loss = nn.MSELoss()
def forward(self, x_cat, x_cont):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def encode(self, x_cat, x_cont):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
return self.encoder(x)
def decode(self, input):
return self.decoder(input)
def reconstruction_loss(self, x_cat, x_cont, rec_data):
if self.embedding:
x_cat = self.emb_layer(x_cat)
if self.numerical:
x = x_cont
else:
x = torch.cat([x_cat, x_cont], -1)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return self._reconstruction_loss(decoded, rec_data) | 2,337 | 31.929577 | 102 | py |
UADAD | UADAD-main/Code/datasets/utils.py | from math import ceil
import pandas as pd
import os
import sys
import numpy as np
import torch
import qGEL
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import confusion_matrix
from sklearn import metrics
def load_data(file_path, names = None):
names = names
data = pd.read_csv(file_path, names = names)
return data
def one_hot_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
for column in encode_data:
new_data = pd.get_dummies(encode_data[column], prefix=column)
encoded_data = pd.concat([encoded_data, new_data], axis=1)
#data.drop(cols, axis=1, inplace=True)
#data = pd.concat([data, encoded_data], axis=1)
return encoded_data
def label_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
encoder = preprocessing.LabelEncoder()
for column in encode_data:
new_data = encoder.fit_transform(encode_data[column])
new_data = pd.DataFrame(new_data, columns=[column])
encoded_data = pd.concat([encoded_data, new_data], axis=1)
return encoded_data
def gel_encoding(data, cols):
encode_data = data[cols]
encoded_data = pd.DataFrame()
for column in encode_data:
new_data = pd.get_dummies(encode_data[column], prefix=column)
encoded_data = pd.concat([encoded_data, new_data], axis=1)
embedding, vectors, source_data_ = qGEL.qgel(source_data_ = encoded_data,
k = int(len(cols)),
learning_method = 'unsupervised')
#data.drop(cols, axis=1, inplace=True)
#data = pd.concat([data, pd.DataFrame(embedding, columns=[x for x in range(0,int(len(cols)))])], axis=1)
return pd.DataFrame(embedding, columns=[x for x in range(0,int(len(cols)))])
def get_labels(data, name):
if name == 'credit_card':
label = data['Class']
ind = np.where(data['Class'] == 1)
data.drop(['Class'], axis = 1, inplace=True)
if name == 'arrhythmia':
label = data['class']
anomaly_classes = [3,4,5,7,8,9,10,14,15]
label = [1 if i in anomaly_classes else 0 for i in label]
label = np.array(label)
data.drop(['class'], axis = 1, inplace=True)
if name == 'kdd':
label = data[41]
label = np.where(label == "normal", 0, 1)
data.drop([41], axis = 1, inplace=True)
if name == 'vehicle_claims':
label = data['Label']
data.drop(['Label'], axis = 1, inplace=True)
if name == 'vehicle_insurance':
label = data['FraudFound_P']
data.drop(['FraudFound_P'], axis = 1, inplace=True)
if name == 'car_insurance':
label = data['fraud_reported']
label = np.where(label == "Y", 1, 0)
data.drop(['fraud_reported'], axis = 1, inplace=True)
return label
def get_normal_data(data, name):
sample_len = len(data)/2
if name == 'credit_card':
label = data['Class']
ind = np.where(data['Class'] == 1)
if name == 'arrhythmia':
label = data['class']
anomaly_classes = [3,4,5,7,8,9,10,14,15]
label = [1 if i in anomaly_classes else 0 for i in label]
label = np.array(label)
if name == 'kdd':
label = data[41]
label = np.where(label == "normal", 0, 1)
if name == 'vehicle_claims':
label = data['Label']
if name == 'vehicle_insurance':
label = data['FraudFound_P']
if name == 'car_insurance':
label = data['fraud_reported']
label = np.where(label == "Y", 1, 0)
data['label'] = label
normal_data = data[data['label'] == 0]
sampled_data = normal_data.sample(n= int(sample_len))
sampled_data = sampled_data.drop('label', axis = 1)
return sampled_data
def get_scores(y_pred, y):
precision = precision_score(y_pred, y, average='binary')
recall = recall_score(y_pred, y, average='binary')
f1 = f1_score(y_pred, y, average='binary')
fpr, tpr, thresholds = metrics.roc_curve(y, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
return precision, recall, f1, auc
def get_confusion_matrix(y_pred, y):
tn, fp, fn, tp = confusion_matrix(y_pred, y).ravel()
return tn, fp, fn, tp
def normalize_cols(data):
sc = MinMaxScaler (feature_range = (0,1))
data = sc.fit_transform(data)
data = pd.DataFrame(data)
return data
def merge_cols(data_1, data_2):
data = pd.concat([data_1, data_2], axis=1)
return data
def remove_cols(data, cols):
data.drop(cols, axis=1, inplace=True)
return data
def split_data(data, Y, split=0.7):
train = data.loc[:split*len(data),]
test = data.loc[split*len(data)+1:,]
Y_train = Y[:int(split*len(data)),]
Y_test = Y[ceil(split*len(data)+1):,]
return train, test, Y_train, Y_test
def fill_na(data):
data = data.fillna(value = 0, axis=1)
return data
def relative_euclidean_distance(x1, x2):
num = torch.norm(x1 - x2, p=2, dim=1)
denom = torch.norm(x1, p=2, dim=1)
print(num.dtype)
print(denom.dtype)
return num / torch.max(denom)
def cosine_similarity(x1, x2, eps=1e-8):
dot_prod = torch.sum(x1 * x2, dim=1)
dist_x1 = torch.norm(x1, p=2, dim=1)
dist_x2 = torch.norm(x2, p=2, dim=1)
return dot_prod / np.max(dist_x1*dist_x2, eps) | 5,507 | 34.307692 | 109 | py |
UADAD | UADAD-main/Code/datasets/vehicle_claims.py | import os
import pandas as pd
from torch.utils.data import Dataset
import torch
import numpy as np
from utils import *
class Vehicle_Claims(Dataset):
def __init__(self, path, encoding = "label_encode", embedding_layer = False, label_file="train_Y.csv"):
path = path
categorical_cols = ['Maker','Reg_year', ' Genmodel', 'Color', 'Bodytype', 'Engin_size', 'Gearbox', 'Fuel_type',
'Seat_num', 'Door_num', 'issue', 'issue_id', 'repair_complexity']
cols_to_remove = [' Genmodel_ID', 'Adv_year', 'Adv_month', 'Adv_day', 'breakdown_date', 'repair_date', 'category_anomaly']
numerical_cols = ['Runned_Miles', 'Price', 'repair_cost', 'repair_hours']
data = load_data(path)
#data = remove_cols(data, cols_to_remove)
label = load_data(os.path.join(os.path.split(path)[0], label_file))
self.label = label["Label"]
self.cont_cols = data[numerical_cols]
self.cat_cols = data.drop(numerical_cols, axis=1)
self.input_dim = self.cat_cols.shape[1] + self.cont_cols.shape[1]
self.output_dim = self.input_dim
self.embed = embedding_layer
if embedding_layer:
embed_data = load_data(os.path.join(os.path.split(path)[0], "vehicle_claims_labeled.csv"))
embed_data = remove_cols(embed_data, cols_to_remove)
embed_data = remove_cols(embed_data, ['Label'])
cat_cols = embed_data.drop(numerical_cols, axis=1).astype("category")
embedded_cols = {n: len(col.cat.categories) for n,col in cat_cols.items() if (col.dtype == "category")}
self.embedding_sizes = [(n_categories, min(50, (n_categories+1)//2)) for _,n_categories in embedded_cols.items()]
embedded_col_names = embedded_cols.keys()
embed = []
for i, name in enumerate(embedded_col_names):
embed_elem = {cat : n for n, cat in enumerate(cat_cols[name].cat.categories)}
embed.append(embed_elem)
self.cat_cols[name] = self.cat_cols[name].replace(embed_elem)
self.input_dim = sum(i[1] for i in self.embedding_sizes) + (self.cont_cols).shape[1]
self.output_dim = data.shape[1]
def __len__(self):
return(len(self.label))
def __getitem__(self, idx):
self.cont_cols = normalize_cols(self.cont_cols)
cat_cols = (self.cat_cols.values.astype(np.float32))
if self.embed:
cat_cols = (self.cat_cols.values.astype(np.int32))
cont_cols = (self.cont_cols.values.astype(np.float32))
label = (self.label.astype(np.int32))
return (cat_cols[idx], cont_cols[idx], label[idx])
| 2,739 | 46.241379 | 131 | py |
UADAD | UADAD-main/Code/datasets/vehicle_insurance.py | import os
import pandas as pd
from torch.utils.data import Dataset
import torch
import numpy as np
from utils import *
class Vehicle_Insurance(Dataset):
def __init__(self, path, encoding = "label_encode", embedding_layer = False, label_file="train_Y.csv"):
path = path
categorical_cols = ['Make', 'AccidentArea', 'Sex', 'MaritalStatus', 'Fault', 'PolicyType',
'VehicleCategory', 'Deductible', 'Days_Policy_Accident', 'Days_Policy_Claim',
'AgeOfVehicle', 'AgeOfPolicyHolder', 'PoliceReportFiled', 'WitnessPresent',
'AgentType', 'NumberOfSuppliments', 'AddressChange_Claim', 'VehiclePrice',
'PastNumberOfClaims', 'NumberOfCars', 'BasePolicy', 'Month', 'MonthClaimed',
'DayOfWeek', 'DayOfWeekClaimed']
numerical_cols = ['Year', 'WeekOfMonth', 'WeekOfMonthClaimed', 'RepNumber', 'DriverRating', 'Age']
data = load_data(path)
data = remove_cols(data, ['PolicyNumber'])
label = load_data(os.path.join(os.path.split(path)[0], label_file))
self.label = label["FraudFound_P"]
self.cont_cols = data[numerical_cols]
self.cat_cols = data.drop(numerical_cols, axis=1)
self.input_dim = self.cat_cols.shape[1] + self.cont_cols.shape[1]
self.output_dim = self.input_dim
self.embed = embedding_layer
if embedding_layer:
embed_data = load_data(os.path.join(os.path.split(path)[0], "fraud_oracle.csv"))
embed_data = remove_cols(embed_data, ['FraudFound_P', 'PolicyNumber'])
cat_cols = embed_data.drop(numerical_cols, axis=1).astype("category")
embedded_cols = {n: len(col.cat.categories) for n,col in cat_cols.items() if (col.dtype == "category")}
self.embedding_sizes = [(n_categories, min(50, (n_categories+1)//2)) for _,n_categories in embedded_cols.items()]
embedded_col_names = embedded_cols.keys()
embed = []
for i, name in enumerate(embedded_col_names):
embed_elem = {cat : n for n, cat in enumerate(cat_cols[name].cat.categories)}
embed.append(embed_elem)
self.cat_cols[name] = self.cat_cols[name].replace(embed_elem)
self.input_dim = sum(i[1] for i in self.embedding_sizes) + (self.cont_cols).shape[1]
self.output_dim = data.shape[1]
def __len__(self):
return(len(self.label))
def __getitem__(self, idx):
self.cont_cols = normalize_cols(self.cont_cols)
cat_cols = (self.cat_cols.values.astype(np.float32))
if self.embed:
cat_cols = (self.cat_cols.values.astype(np.int32))
cont_cols = (self.cont_cols.values.astype(np.float32))
label = (self.label.astype(np.int32))
return (cat_cols[idx], cont_cols[idx], label[idx])
| 2,885 | 53.45283 | 125 | py |
UADAD | UADAD-main/Code/datasets/car_insurance.py | import os
import pandas as pd
from torch.utils.data import Dataset
import torch
import numpy as np
from utils import *
class Car_insurance(Dataset):
def __init__(self, path, encoding = "label_encode", embedding_layer = False, label_file="train_Y.csv"):
path = path
categorical_cols = ['policy_state', 'umbrella_limit', 'insured_sex', 'insured_education_level',
'insured_occupation', 'insured_hobbies', 'insured_relationship', 'incident_type',
'collision_type', 'incident_severity', 'authorities_contacted', 'incident_state', 'incident_city',
'property_damage', 'police_report_available', 'auto_make', 'auto_model']
cols_to_remove = ['policy_number', 'policy_bind_date', 'policy_csl', 'incident_location', 'incident_date', '_c39']
numerical_cols = ['months_as_customer', 'age', 'policy_deductable', 'policy_annual_premium', 'insured_zip' ,'capital-gains',
'capital-loss', 'incident_hour_of_the_day', 'number_of_vehicles_involved', 'bodily_injuries', 'witnesses', 'total_claim_amount',
'injury_claim', 'property_claim', 'vehicle_claim', 'auto_year']
data = load_data(path)
#data = remove_cols(data, cols_to_remove)
label = load_data(os.path.join(os.path.split(path)[0], label_file))
self.label = label["Label"]
self.cont_cols = data[numerical_cols]
self.cat_cols = data.drop(numerical_cols, axis=1)
self.input_dim = self.cat_cols.shape[1] + self.cont_cols.shape[1]
self.output_dim = self.input_dim
self.embed = embedding_layer
if embedding_layer:
embed_data = load_data(os.path.join(os.path.split(path)[0], "insurance_claims.csv"))
cat_cols = embed_data[categorical_cols].astype("category")
embedded_cols = {n: len(col.cat.categories) for n,col in cat_cols.items() if (col.dtype == "category")}
self.embedding_sizes = [(n_categories, min(50, (n_categories+1)//2)) for _,n_categories in embedded_cols.items()]
embedded_col_names = embedded_cols.keys()
embed = []
for i, name in enumerate(embedded_col_names):
embed_elem = {cat : n for n, cat in enumerate(cat_cols[name].cat.categories)}
embed.append(embed_elem)
self.cat_cols[name] = self.cat_cols[name].replace(embed_elem)
self.input_dim = sum(i[1] for i in self.embedding_sizes) + (self.cont_cols).shape[1]
self.output_dim = data.shape[1]
def __len__(self):
return(len(self.label))
def __getitem__(self, idx):
self.cont_cols = normalize_cols(self.cont_cols)
cat_cols = (self.cat_cols.values.astype(np.float32))
if self.embed:
cat_cols = (self.cat_cols.values.astype(np.int32))
cont_cols = (self.cont_cols.values.astype(np.float32))
label = (self.label.astype(np.int32))
return (cat_cols[idx], cont_cols[idx], label[idx]) | 3,026 | 54.036364 | 137 | py |
UADAD | UADAD-main/Code/classic_ML/SOM.py | import numpy as np
from minisom import MiniSom
import torch
import numpy as np
from torch import nn
from fastai.layers import Embedding
from fastai.torch_core import Module
from typing import List, Tuple
class EmbeddingLayer(Module):
def __init__(self, emb_szs: List[Tuple[int, int]]):
self.embeddings = torch.nn.ModuleList([Embedding(in_sz, out_sz) for in_sz, out_sz in emb_szs])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = [emb(x[..., i]) for i, emb in enumerate(self.embeddings)]
x = torch.cat(x, dim=-1)
return x
def som_train(data, x=10, y=10, sigma=1, learning_rate= 0.05, iters= 10000):
input_len = data.shape[1]
print("SOM training started:")
som = MiniSom(x= x, y= y, input_len=input_len, sigma=sigma, learning_rate=learning_rate)
som.random_weights_init(data)
som.train_random(data, iters)
return som
def som_embedding_data(x_cat, x_cont, emb):
emb_layer = EmbeddingLayer(emb)
x_cat = emb_layer(x_cat)
#i = 1
#if i == 1:
# param = []
# for params in emb_layer.parameters():
# param.append(params.detach().numpy())
# with open('emb_weights.npy', 'wb') as f:
# np.save(f, param)
data = torch.cat([x_cat, x_cont], -1)
return data
def som_pred(som_model, data):
model = som_model
data = data.numpy()
quantization_errors = np.linalg.norm(model.quantization(data) - data, axis=1)
return quantization_errors | 1,480 | 28.62 | 102 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/setup.py | import os
import subprocess
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_git_commit_number():
if not os.path.exists('.git'):
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
def make_cuda_ext(name, module, sources):
cuda_ext = CUDAExtension(
name='%s.%s' % (module, name),
sources=[os.path.join(*module.split('.'), src) for src in sources]
)
return cuda_ext
def write_version_to_file(version, target_file):
with open(target_file, 'w') as f:
print('__version__ = "%s"' % version, file=f)
if __name__ == '__main__':
version = '0.5.2+%s' % get_git_commit_number()
write_version_to_file(version, 'pcdet/version.py')
setup(
name='pcdet',
version=version,
description='OpenPCDet is a general codebase for 3D object detection from point cloud',
install_requires=[
'numpy<=1.20',
'llvmlite',
'numba',
'tensorboardX',
'easydict',
'pyyaml',
'scikit-image',
'tqdm',
'SharedArray',
# 'spconv', # spconv has different names depending on the cuda version
],
author='Shaoshuai Shi',
author_email='shaoshuaics@gmail.com',
license='Apache License 2.0',
packages=find_packages(exclude=['tools', 'data', 'output']),
cmdclass={
'build_ext': BuildExtension,
},
ext_modules=[
make_cuda_ext(
name='iou3d_nms_cuda',
module='pcdet.ops.iou3d_nms',
sources=[
'src/iou3d_cpu.cpp',
'src/iou3d_nms_api.cpp',
'src/iou3d_nms.cpp',
'src/iou3d_nms_kernel.cu',
]
),
make_cuda_ext(
name='roiaware_pool3d_cuda',
module='pcdet.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/roiaware_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='roipoint_pool3d_cuda',
module='pcdet.ops.roipoint_pool3d',
sources=[
'src/roipoint_pool3d.cpp',
'src/roipoint_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='pointnet2_stack_cuda',
module='pcdet.ops.pointnet2.pointnet2_stack',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/voxel_query.cpp',
'src/voxel_query_gpu.cu',
'src/vector_pool.cpp',
'src/vector_pool_gpu.cu'
],
),
make_cuda_ext(
name='pointnet2_batch_cuda',
module='pcdet.ops.pointnet2.pointnet2_batch',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
),
],
)
| 3,960 | 31.467213 | 95 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/test.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
import wandb
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4"
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/active-kitti_models/pv_rcnn_active_random.yaml', help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=4, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='select-100', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=True, help='whether to evaluate all checkpoints')
parser.add_argument('--eval_backbone', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
common_utils.set_random_seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
# ckpt_list.sort(key=os.path.getmtime)
ckpt_list.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if "select" in epoch_id: # active training epochs
epoch_id = epoch_id.split('_')[0]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
if cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] == 'waymo_dataset':
wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] + '_test_select-{}'.format(cfg.ACTIVE_TRAIN.SELECT_NUMS), entity='user')
elif cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] == 'kitti_dataset':
wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] + '_test_select-{}'.format(cfg.ACTIVE_TRAIN.SELECT_NUMS), entity='user')
else:
raise NotImplementedError
run_name_elements = [cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0]] + ['backbone' if args.eval_backbone else cfg.TAG] + [cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL] + [cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_NUMS]
run_name_elements = '_'.join([str(i) for i in run_name_elements])
wandb.run.name = run_name_elements
wandb.config.update(args)
wandb.config.update(cfg)
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
wandb.log({key: val}, step=int(cur_epoch_id))
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
args.extra_tag = 'select-{}'.format(cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS)
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.eval_backbone:
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / 'backbone' / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
else:
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
if args.eval_backbone:
logger.info('start evaluating backbones for {}...'.format(cfg.EXP_GROUP_PATH))
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 10,291 | 43.747826 | 305 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/demo.py | import argparse
import glob
from pathlib import Path
try:
import open3d
from visual_utils import open3d_vis_utils as V
OPEN3D_FLAG = True
except:
import mayavi.mlab as mlab
from visual_utils import visualize_utils as V
OPEN3D_FLAG = False
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
)
if not OPEN3D_FLAG:
mlab.show(stop=True)
logger.info('Demo done.')
if __name__ == '__main__':
main()
| 3,750 | 32.19469 | 118 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/train.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import wandb
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader, build_active_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
from train_utils.train_st_utils import train_model_st
from train_utils.train_active_utils import train_model_active
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3,4"
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
# torch.cuda.set_device(0,1,2)
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/active-kitti_models/pv_rcnn_active_random.yaml', help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=6, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='select-100', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=1888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=True, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=5, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=1, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=100, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
args.extra_tag = 'select-{}'.format(cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
# add ps label and active label
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
active_label_dir = output_dir / 'active_label'
active_label_dir.mkdir(parents=True, exist_ok=True)
# for all active methods, they share the same pretrained model weights
backbone_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / 'backbone' / args.extra_tag / 'ckpt'
backbone_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] + '_train', entity="user", tags=args.cfg_file.split('/')[-1].split('.')[0])
run_name_elements = [cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0]] + [cfg.TAG] + [cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL] + \
[cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_NUMS] + [datetime.datetime.now().strftime('%Y%m%d-%H%M%S')]
run_name_elements = '_'.join([str(i) for i in run_name_elements])
wandb.run.name = run_name_elements
wandb.config.update(args)
wandb.config.update(cfg)
# -----------------------create dataloader & network & optimizer---------------------------
# ---------------- Active Training ----------------
if cfg.get('ACTIVE_TRAIN', None):
labelled_set, unlabelled_set, labelled_loader, unlabelled_loader, \
labelled_sampler, unlabelled_sampler = build_active_dataloader(
cfg.DATA_CONFIG, cfg.CLASS_NAMES, args.batch_size,
dist_train, workers=args.workers, logger=logger, training=True
)
else:
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('SELF_TRAIN', None):
target_set, target_loader, target_sampler = build_dataloader(
cfg.DATA_CONFIG_TAR, cfg.DATA_CONFIG_TAR.CLASS_NAMES, args.batch_size,
dist_train, workers=args.workers, logger=logger, training=True
)
else:
target_set = target_loader = target_sampler = None
if cfg.get('ACTIVE_TRAIN', None):
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES),
dataset=labelled_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
logger.info('device count: {}'.format(torch.cuda.device_count()))
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
# else:
# model = nn.DataParallel(model, device_ids=[0,1,2])
logger.info(model)
# ---------------- Active Training ----------------
if cfg.get('SELF_TRAIN', None):
total_iters_each_epoch = len(target_loader) if not args.merge_all_iters_to_one_epoch \
else len(target_loader) // args.epochs
elif cfg.get('ACTIVE_TRAIN', None):
total_iters_each_epoch = len(labelled_loader) if not args.merge_all_iters_to_one_epoch \
else len(labelled_loader) // args.epochs
else:
total_iters_each_epoch = len(source_loader) if not args.merge_all_iters_to_one_epoch \
else len(source_loader) // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# select proper trainer
train_func = train_model_st if cfg.get('SELF_TRAIN', None) else train_model
if cfg.get('ACTIVE_TRAIN', None):
train_func = train_model_active
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if cfg.get('ACTIVE_TRAIN', None):
train_func(
model,
optimizer,
labelled_loader,
unlabelled_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
active_label_dir=active_label_dir,
backbone_dir=backbone_dir,
labelled_sampler=labelled_sampler,
unlabelled_sampler=unlabelled_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None,
dist_train=dist_train
)
else:
train_func(
model,
optimizer,
source_loader,
target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
# if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory:
# train_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
# repeat_eval_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 13,701 | 42.636943 | 195 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/visualize.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import pickle
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
import wandb
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/active-kitti_models/pv_rcnn_active_montecarlo_from_scratch.yaml', help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=4, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='select-100', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=1, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=True, help='whether to evaluate all checkpoints')
parser.add_argument('--eval_backbone', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
common_utils.set_random_seed(1024)
args.extra_tag = 'select-{}'.format(cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if "select" in epoch_id: # active training epochs
epoch_id = epoch_id.split('_')[0]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('vis_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_vis_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
# wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] + '_vis', entity='user')
wandb.init(project=cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0] + '_vis_select-{}'.format(cfg.ACTIVE_TRAIN.SELECT_NUMS), entity='user')
run_name_elements = [cfg.DATA_CONFIG._BASE_CONFIG_.split('/')[-1].split('.')[0]] + ['backbone' if args.eval_backbone else cfg.TAG] + [cfg.ACTIVE_TRAIN.PRE_TRAIN_EPOCH_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_LABEL_EPOCH_INTERVAL] + [cfg.ACTIVE_TRAIN.PRE_TRAIN_SAMPLE_NUMS] + [cfg.ACTIVE_TRAIN.SELECT_NUMS]
run_name_elements = '_'.join([str(i) for i in run_name_elements])
wandb.run.name = run_name_elements
wandb.config.update(args)
wandb.config.update(cfg)
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
# model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
# model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.vis_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
active_label_files = glob.glob(str(eval_output_dir.parents[2] / 'active_label' / 'selected_frames_epoch_*.pkl'))
active_label_files = [file for file in active_label_files if int(file.split('.')[0].split('_')[-3]) <= int(cur_epoch_id)]
num_bbox = 0
for file in active_label_files:
with open(file, 'rb') as f:
records = pickle.load(f)
num_bbox += sum([sum(i.values()) for i in records['selected_bbox']])
num_bbox = int(num_bbox)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, num_bbox)
wandb.log({key: val}, step=num_bbox)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been visualised' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.eval_backbone:
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / 'backbone' / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
else:
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_vis_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
if args.eval_backbone:
logger.info('start visualizing backbones for {}...'.format(cfg.EXP_GROUP_PATH))
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# no need model to visualise
model = None
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 10,415 | 43.323404 | 305 | py |
CRB-active-3Ddet | CRB-active-3Ddet-main/tools/eval_utils/eval_utils.py | import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def add_avg_performance(dataset, result_dict):
avg_types = ['3d', 'bev', 'image', 'aos']
avg_class_names = dataset.class_names
difficulty_levels = ['easy', 'moderate', 'hard']
if dataset.dataset_cfg['DATASET'] == 'KittiDataset':
for type in avg_types:
for difficulty in difficulty_levels:
new_key = 'Average/{}_{}_R40'.format(type, difficulty)
new_value = [selected_key for selected_key in result_dict.keys() if type in selected_key and difficulty in selected_key]
new_value = [result_dict[i] for i in new_value]
new_value = sum(new_value) / len(new_value) if len(new_value) != 0 else 0
result_dict[new_key] = new_value
elif dataset.dataset_cfg['DATASET'] == 'WaymoDataset':
avg_types = ['AP', 'APH']
avg_class_names = dataset.class_names
difficulty_levels = ['Level_1', 'Level_2']
for type in avg_types:
for difficulty in difficulty_levels:
new_key = 'Average/{}_{}'.format(type, difficulty) # do not consider sign class
new_value = [selected_key for selected_key in result_dict.keys() if type.lower() == selected_key.lower().split('/')[-1] and difficulty.lower() in selected_key.lower() and 'sign' not in selected_key.lower()]
new_value = [result_dict[i] for i in new_value]
new_value = sum(new_value) / len(new_value) if len(new_value) != 0 else 0
result_dict[new_key] = new_value
# TODO: NuScenes
else:
raise NotImplementedError
return result_dict
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
batch_dict['test'] = True
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
result_dict = add_avg_performance(dataset, result_dict)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
def vis_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
# metric = {
# 'gt_num': 0,
# }
# for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
# metric['recall_roi_%s' % str(cur_thresh)] = 0
# metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
# det_annos = []
# logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
# if dist_test:
# num_gpus = torch.cuda.device_count()
# local_rank = cfg.LOCAL_RANK % num_gpus
# model = torch.nn.parallel.DistributedDataParallel(
# model,
# device_ids=[local_rank],
# broadcast_buffers=False
# )
# model.eval()
# if cfg.LOCAL_RANK == 0:
# progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
# start_time = time.time()
# for i, batch_dict in enumerate(dataloader):
# load_data_to_gpu(batch_dict)
# with torch.no_grad():
# pred_dicts, ret_dict = model(batch_dict)
# disp_dict = {}
# statistics_info(cfg, ret_dict, metric, disp_dict)
# annos = dataset.generate_prediction_dicts(
# batch_dict, pred_dicts, class_names,
# output_path=final_output_dir if save_to_file else None
# )
# det_annos += annos
# if cfg.LOCAL_RANK == 0:
# progress_bar.set_postfix(disp_dict)
# progress_bar.update()
# if cfg.LOCAL_RANK == 0:
# progress_bar.close()
# if dist_test:
# rank, world_size = common_utils.get_dist_info()
# det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
# metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
# logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
# sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
# logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
# if cfg.LOCAL_RANK != 0:
# return {}
ret_dict = {}
# if dist_test:
# for key, val in metric[0].items():
# for k in range(1, world_size):
# metric[0][key] += metric[k][key]
# metric = metric[0]
# gt_num_cnt = metric['gt_num']
# for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
# cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
# cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
# logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
# logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
# ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
# ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
# total_pred_objects = 0
# for anno in det_annos:
# total_pred_objects += anno['name'].__len__()
# logger.info('Average predicted number of objects(%d samples): %.3f'
# % (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'rb') as f:
det_annos = pickle.load(f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
result_dict = add_avg_performance(dataset, result_dict)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| 10,481 | 39.16092 | 222 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.