repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DG-Font | DG-Font-main/train/train.py | from tqdm import trange
import torch.nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from tools.utils import *
from tools.ops import compute_grad_gp, update_average, copy_norm_params, queue_data, dequeue_data, \
average_gradients, calc_adv_loss, calc_contrastive_loss, calc_recon_loss
def trainGAN(data_loader, networks, opts, epoch, args, additional):
# avg meter
d_losses = AverageMeter()
d_advs = AverageMeter()
d_gps = AverageMeter()
g_losses = AverageMeter()
g_advs = AverageMeter()
g_imgrecs = AverageMeter()
g_rec = AverageMeter()
moco_losses = AverageMeter()
# set nets
D = networks['D']
G = networks['G'] if not args.distributed else networks['G'].module
C = networks['C'] if not args.distributed else networks['C'].module
G_EMA = networks['G_EMA'] if not args.distributed else networks['G_EMA'].module
C_EMA = networks['C_EMA'] if not args.distributed else networks['C_EMA'].module
# set opts
d_opt = opts['D']
g_opt = opts['G']
c_opt = opts['C']
# switch to train mode
D.train()
G.train()
C.train()
C_EMA.train()
G_EMA.train()
logger = additional['logger']
# summary writer
train_it = iter(data_loader)
t_train = trange(0, args.iters, initial=0, total=args.iters)
for i in t_train:
try:
imgs, y_org = next(train_it)
except:
train_it = iter(data_loader)
imgs, y_org = next(train_it)
x_org = imgs
x_ref_idx = torch.randperm(x_org.size(0))
x_org = x_org.cuda(args.gpu)
y_org = y_org.cuda(args.gpu)
x_ref_idx = x_ref_idx.cuda(args.gpu)
x_ref = x_org.clone()
x_ref = x_ref[x_ref_idx]
training_mode = 'GAN'
####################
# BEGIN Train GANs #
####################
with torch.no_grad():
y_ref = y_org.clone()
y_ref = y_ref[x_ref_idx]
s_ref = C.moco(x_ref)
c_src, skip1, skip2 = G.cnt_encoder(x_org)
x_fake, _ = G.decode(c_src, s_ref, skip1, skip2)
x_ref.requires_grad_()
d_real_logit, _ = D(x_ref, y_ref)
d_fake_logit, _ = D(x_fake.detach(), y_ref)
d_adv_real = calc_adv_loss(d_real_logit, 'd_real')
d_adv_fake = calc_adv_loss(d_fake_logit, 'd_fake')
d_adv = d_adv_real + d_adv_fake
d_gp = args.w_gp * compute_grad_gp(d_real_logit, x_ref, is_patch=False)
d_loss = d_adv + d_gp
d_opt.zero_grad()
d_adv_real.backward(retain_graph=True)
d_gp.backward()
d_adv_fake.backward()
if args.distributed:
average_gradients(D)
d_opt.step()
# Train G
s_src = C.moco(x_org)
s_ref = C.moco(x_ref)
c_src, skip1, skip2 = G.cnt_encoder(x_org)
x_fake, offset_loss = G.decode(c_src, s_ref, skip1, skip2)
x_rec, _ = G.decode(c_src, s_src, skip1, skip2)
g_fake_logit, _ = D(x_fake, y_ref)
g_rec_logit, _ = D(x_rec, y_org)
g_adv_fake = calc_adv_loss(g_fake_logit, 'g')
g_adv_rec = calc_adv_loss(g_rec_logit, 'g')
g_adv = g_adv_fake + g_adv_rec
g_imgrec = calc_recon_loss(x_rec, x_org)
c_x_fake, _, _ = G.cnt_encoder(x_fake)
g_conrec = calc_recon_loss(c_x_fake, c_src)
g_loss = args.w_adv * g_adv + args.w_rec * g_imgrec +args.w_rec * g_conrec + args.w_off * offset_loss
g_opt.zero_grad()
c_opt.zero_grad()
g_loss.backward()
if args.distributed:
average_gradients(G)
average_gradients(C)
c_opt.step()
g_opt.step()
##################
# END Train GANs #
##################
if epoch >= args.ema_start:
training_mode = training_mode + "_EMA"
update_average(G_EMA, G)
update_average(C_EMA, C)
torch.cuda.synchronize()
with torch.no_grad():
if epoch >= args.separated:
d_losses.update(d_loss.item(), x_org.size(0))
d_advs.update(d_adv.item(), x_org.size(0))
d_gps.update(d_gp.item(), x_org.size(0))
g_losses.update(g_loss.item(), x_org.size(0))
g_advs.update(g_adv.item(), x_org.size(0))
g_imgrecs.update(g_imgrec.item(), x_org.size(0))
g_rec.update(g_conrec.item(), x_org.size(0))
moco_losses.update(offset_loss.item(), x_org.size(0))
if (i + 1) % args.log_step == 0 and (args.gpu == 0 or args.gpu == '0'):
summary_step = epoch * args.iters + i
add_logs(args, logger, 'D/LOSS', d_losses.avg, summary_step)
add_logs(args, logger, 'D/ADV', d_advs.avg, summary_step)
add_logs(args, logger, 'D/GP', d_gps.avg, summary_step)
add_logs(args, logger, 'G/LOSS', g_losses.avg, summary_step)
add_logs(args, logger, 'G/ADV', g_advs.avg, summary_step)
add_logs(args, logger, 'G/IMGREC', g_imgrecs.avg, summary_step)
add_logs(args, logger, 'G/conrec', g_rec.avg, summary_step)
add_logs(args, logger, 'C/OFFSET', moco_losses.avg, summary_step)
print('Epoch: [{}/{}] [{}/{}] MODE[{}] Avg Loss: D[{d_losses.avg:.2f}] G[{g_losses.avg:.2f}] '.format(epoch + 1, args.epochs, i+1, args.iters,
training_mode, d_losses=d_losses, g_losses=g_losses))
copy_norm_params(G_EMA, G)
copy_norm_params(C_EMA, C)
| 5,671 | 30.511111 | 158 | py |
DG-Font | DG-Font-main/validation/validation.py | import torch.nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.utils as vutils
import torch.nn.functional as F
import numpy as np
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x):
return x
from scipy import linalg
from tools.utils import *
def validateUN(data_loader, networks, epoch, args, additional=None):
# set nets
D = networks['D']
G = networks['G'] if not args.distributed else networks['G'].module
C = networks['C'] if not args.distributed else networks['C'].module
C_EMA = networks['C_EMA'] if not args.distributed else networks['C_EMA'].module
G_EMA = networks['G_EMA'] if not args.distributed else networks['G_EMA'].module
# switch to train mode
D.eval()
G.eval()
C.eval()
C_EMA.eval()
G_EMA.eval()
# data loader
val_dataset = data_loader['TRAINSET']
val_loader = data_loader['VAL']
x_each_cls = []
with torch.no_grad():
val_tot_tars = torch.tensor(val_dataset.targets)
for cls_idx in range(len(args.att_to_use)):
tmp_cls_set = (val_tot_tars == args.att_to_use[cls_idx]).nonzero()[-args.val_num:]
tmp_ds = torch.utils.data.Subset(val_dataset, tmp_cls_set)
tmp_dl = torch.utils.data.DataLoader(tmp_ds, batch_size=args.val_num, shuffle=False,
num_workers=0, pin_memory=True, drop_last=False)
tmp_iter = iter(tmp_dl)
tmp_sample = None
for sample_idx in range(len(tmp_iter)):
imgs, _ = next(tmp_iter)
x_ = imgs
if tmp_sample is None:
tmp_sample = x_.clone()
else:
tmp_sample = torch.cat((tmp_sample, x_), 0)
x_each_cls.append(tmp_sample)
if epoch >= args.fid_start:
# Reference guided
with torch.no_grad():
# Just a buffer image ( to make a grid )
ones = torch.ones(1, x_each_cls[0].size(1), x_each_cls[0].size(2), x_each_cls[0].size(3)).cuda(args.gpu, non_blocking=True)
for src_idx in range(len(args.att_to_use)):
x_src = x_each_cls[src_idx][:args.val_batch, :, :, :].cuda(args.gpu, non_blocking=True)
rnd_idx = torch.randperm(x_each_cls[src_idx].size(0))[:args.val_batch]
x_src_rnd = x_each_cls[src_idx][rnd_idx].cuda(args.gpu, non_blocking=True)
for ref_idx in range(len(args.att_to_use)):
x_res_ema = torch.cat((ones, x_src), 0)
x_rnd_ema = torch.cat((ones, x_src_rnd), 0)
x_ref = x_each_cls[ref_idx][:args.val_batch, :, :, :].cuda(args.gpu, non_blocking=True)
rnd_idx = torch.randperm(x_each_cls[ref_idx].size(0))[:args.val_batch]
x_ref_rnd = x_each_cls[ref_idx][rnd_idx].cuda(args.gpu, non_blocking=True)
for sample_idx in range(args.val_batch):
x_ref_tmp = x_ref[sample_idx: sample_idx + 1].repeat((args.val_batch, 1, 1, 1))
c_src, skip1, skip2 = G_EMA.cnt_encoder(x_src)
s_ref = C_EMA(x_ref_tmp, sty=True)
x_res_ema_tmp,_ = G_EMA.decode(c_src, s_ref, skip1, skip2)
x_ref_tmp = x_ref_rnd[sample_idx: sample_idx + 1].repeat((args.val_batch, 1, 1, 1))
c_src, skip1, skip2 = G_EMA.cnt_encoder(x_src_rnd)
s_ref = C_EMA(x_ref_tmp, sty=True)
x_rnd_ema_tmp,_ = G_EMA.decode(c_src, s_ref, skip1, skip2)
x_res_ema_tmp = torch.cat((x_ref[sample_idx: sample_idx + 1], x_res_ema_tmp), 0)
x_res_ema = torch.cat((x_res_ema, x_res_ema_tmp), 0)
x_rnd_ema_tmp = torch.cat((x_ref_rnd[sample_idx: sample_idx + 1], x_rnd_ema_tmp), 0)
x_rnd_ema = torch.cat((x_rnd_ema, x_rnd_ema_tmp), 0)
vutils.save_image(x_res_ema, os.path.join(args.res_dir, '{}_EMA_{}_{}{}.jpg'.format(args.gpu, epoch+1, src_idx, ref_idx)), normalize=True,
nrow=(x_res_ema.size(0) // (x_src.size(0) + 2) + 1))
vutils.save_image(x_rnd_ema, os.path.join(args.res_dir, '{}_RNDEMA_{}_{}{}.jpg'.format(args.gpu, epoch+1, src_idx, ref_idx)), normalize=True,
nrow=(x_res_ema.size(0) // (x_src.size(0) + 2) + 1))
| 4,643 | 46.387755 | 161 | py |
FPConv | FPConv-master/tools/test_scannet.py | import torch
from torch.utils.data import DataLoader
import numpy as np
import argparse
import importlib
import os
import sys
import json
from utils.switchnorm import convert_sn
from datasets.scannet_dataset_rgb_test import ScannetDatasetWholeScene_evaluation
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='6,7')
parser.add_argument("--batch_size", type=int, default=48)
parser.add_argument("--with_rgb", action='store_true', default=False)
parser.add_argument("--with_norm", action='store_true', default=False)
parser.add_argument("--use_sn", action='store_true', default=False)
parser.add_argument("--model", type=str, default='fpcnn_scannet_tiny_v3')
parser.add_argument("--weight_dir", type=str, default=None)
parser.add_argument("--save_dir", type=str, default=None)
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument("--skip_exist", type=bool, default=False)
parser.add_argument("--num_points", type=int, default=8192)
parser.add_argument("--mode", type=str, default='eval')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
# load config files
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
NUM_CLASSES = 21
NUM_POINTS = args.num_points # 8192 # 10240 + 1024
SEM_LABELS = None
class_dict = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) # 21 (0: unknown)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
print(filename)
raise FileNotFoundError
return epoch
def vote(predict, vote_num, pred, points_idx):
''' numpy array
:param predict: (pn,21) float
:param vote_num: (pn,1) int
:param pred: (bs,np,21) float
:param points_idx: (bs,np) int
'''
bs, np = points_idx.shape
for i in range(bs):
for j in range(np):
pred_ = pred[i, j, :] # 21
pidx_ = points_idx[i, j] # int
predict[pidx_, :] += pred_
vote_num[pidx_, 0] += 1
return predict, vote_num
def write_to_file(path, probs):
'''
:param path: path to save predicted label
:param probs: N,22
'''
file_name = path + ('.txt' if args.mode == 'test' else '.npy')
if args.skip_exist and os.path.isfile(file_name):
print(' -- file exists, skip', file_name)
return
if args.mode == 'test':
predict = np.argmax(probs[:, 1:], axis=1) # pn
predict += 1
predict = class_dict[predict]
with open(file_name, 'w') as f:
f.write(str(predict[0]))
for pred in predict[1:]:
f.write('\n{}'.format(pred))
else:
np.save(file_name, probs)
print(' -- save file to ====>', file_name)
def test(model, dst_loader, pn_list, scene_list):
'''
:param pn_list: sn (list => int), the number of points in a scene
:param scene_list: sn (list => str), scene id
'''
model.eval()
total_seen = 0
total_correct = 0
total_seen_class = [0] * NUM_CLASSES
total_correct_class = [0] * NUM_CLASSES
total_iou_deno_class = [0] * NUM_CLASSES
scene_num = len(scene_list)
for scene_index in range(scene_num):
print(' ======= {}/{} ======= '.format(scene_index, scene_num))
# scene_index = 0
scene_id = scene_list[scene_index]
point_num = pn_list[scene_index]
predict = np.zeros((point_num, NUM_CLASSES), dtype=np.float32) # pn,21
vote_num = np.zeros((point_num, 1), dtype=np.int) # pn,1
for batch_data in dst_loader:
pc, seg, smpw, pidx= batch_data
pc = pc.cuda().float()
pred = model(pc) # B,N,C
pred = torch.nn.functional.softmax(pred, dim=2)
pred = pred.cpu().detach().numpy()
pidx = pidx.numpy() # B,N
predict, vote_num = vote(predict, vote_num, pred, pidx)
predict = predict / vote_num
if args.save_dir is not None:
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
save_path = os.path.join(args.save_dir, '{}'.format(scene_id))
write_to_file(save_path, predict)
if args.mode != 'test':
predict = np.argmax(predict[:, 1:], axis=1) # pn
predict += 1
labels = SEM_LABELS[scene_index]
total_seen += np.sum(labels > 0) # point_num
total_correct += np.sum((predict == labels) & (labels > 0))
print('accuracy: ', total_correct / total_seen)
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((labels == l) & (labels > 0))
total_correct_class[l] += np.sum((predict == l) & (labels == l))
total_iou_deno_class[l] += np.sum(((predict == l) & (labels > 0)) | (labels == l))
if args.mode != 'test':
IoU = np.array(total_correct_class[1:])/(np.array(total_iou_deno_class[1:],dtype=np.float)+1e-6)
print('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
print('Class %d : %.4f'%(i+1, IoU[i]))
print('eval accuracy: %f'% (total_correct / float(total_seen)))
print('eval avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
if __name__ == '__main__':
input_channels = 0
if args.with_rgb: input_channels += 3
if args.with_norm: input_channels += 3
# Initialize Model and Data Loader
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels, num_pts=args.num_points)
if args.use_sn:
print(' --- use sn')
model = convert_sn(model)
load_checkpoint(model, args.weight_dir)
model.cuda()
model = torch.nn.parallel.DataParallel(model)
test_dst = ScannetDatasetWholeScene_evaluation(root=_cfg['scannet_pickle'],
scene_list_dir=_cfg['scene_list'],
split=args.mode,
block_points=NUM_POINTS,
with_rgb=args.with_rgb,
with_norm=args.with_norm)
pn_list = test_dst.point_num
scene_list = test_dst.scene_list
SEM_LABELS = test_dst.semantic_labels_list
test_loader = DataLoader(test_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=0)
with torch.no_grad():
test(model, test_loader, pn_list, scene_list)
| 7,024 | 36.367021 | 139 | py |
FPConv | FPConv-master/tools/test_s3dis.py | import os, sys
import json
import numpy as np
import argparse
import importlib
import torch
from torch.utils.data import DataLoader
from datasets.s3dis_dataset_test import S3DISWholeScene_evaluation
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0')
parser.add_argument("--batch_size", type=int, default=12)
parser.add_argument("--model", type=str, default='fpcnn_s3dis')
parser.add_argument("--stride", type=float, default=0.5)
parser.add_argument("--block_size", type=float, default=2)
parser.add_argument("--test_area", type=int, default=5)
parser.add_argument("--num_pts", type=int, default=14564)
parser.add_argument("--weight_dir", type=str, default=None) # checkpoint path
parser.add_argument("--save_dir", type=str, default=None)
parser.add_argument("--config", type=str, default='./config.json')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
SEM_LABELS = None
NUM_CLASSES = 13
NUM_POINTS = args.num_pts
class_name_path = os.path.join('utils/s3dis_meta/class_names.txt')
g_classes = [x.rstrip() for x in open(class_name_path)]
class_dict = np.arange(13)
if args.save_dir is not None:
os.makedirs(args.save_dir, exist_ok=True)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
raise FileNotFoundError
return epoch
def vote(predict, pred, points_idx, vote_num_point):
''' numpy array
:param predict: (pn,21) float
:param pred: (bs,np,21) float
:param points_idx: (bs,np) int
:param vote_num_point: (pn, 1) times that points are overlapped
'''
bs, np = points_idx.shape
for i in range(bs):
for j in range(np):
pred_ = pred[i, j, :] # 21
pidx_ = points_idx[i, j] # int
predict[pidx_, :] += pred_
return predict
def write_to_file(path, labels):
'''
:param path: path to save predicted label
:param labels: n (list => int)
'''
np.save(path, labels)
def test(model, dst_loader, pn_list, scene_list):
'''
:param pn_list: sn (list => int), the number of points in a scene
:param scene_list: sn (list => str), scene id
'''
model.eval()
total_seen = 0
total_correct = 0
total_seen_class = [0] * NUM_CLASSES
total_correct_class = [0] * NUM_CLASSES
total_iou_deno_class = [0] * NUM_CLASSES
scene_num = len(scene_list)
for scene_index in range(scene_num):
print(' ======= {}/{} ======= '.format(scene_index, scene_num))
scene_id = scene_list[scene_index]
point_num = pn_list[scene_index]
predict = np.zeros((point_num, NUM_CLASSES), dtype=np.float32) # pn,21
vote_num_point = np.zeros((point_num, 1), dtype=np.float32)
for batch_data in dst_loader: # tqdm(dst_loader):
pc, seg, pidx = batch_data
pc = pc.cuda().float()
with torch.no_grad():
pred = model(pc) # B,N,C
pred = torch.softmax(pred, dim=2)
pred = pred.cpu().detach().numpy()
seg = seg.data.numpy()
pidx = pidx.numpy() # B,N
predict = vote(predict, pred, pidx, vote_num_point)
predict = np.argmax(predict, axis=1)
# Save predictions
if args.save_dir is not None:
save_path = os.path.join(args.save_dir, scene_id)
write_to_file(save_path, predict)
print('Save predicted label to {}.'.format(save_path))
labels = SEM_LABELS[scene_index]
total_seen += np.sum(labels >= 0) # point_num
total_correct += np.sum((predict == labels) & (labels >= 0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((labels == l) & (labels >= 0))
total_correct_class[l] += np.sum((predict == l)
& (labels == l))
total_iou_deno_class[l] += np.sum(
((predict == l) & (labels >= 0)) | (labels == l))
print('Batch eval accuracy: %f' %
(total_correct / float(total_seen)))
IoU = np.array(
total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6)
print('eval point avg class IoU: %f' % (np.mean(IoU)))
for i in range(IoU.shape[0]):
print('%s : %.4f' % (g_classes[i], IoU[i]))
print('eval accuracy: %f' % (total_correct / float(total_seen)))
print('eval avg class acc: %f' % (np.mean(np.array(
total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
if __name__ == '__main__':
# Initialize Model and Data Loader
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=6)
load_checkpoint(model, args.weight_dir)
model = torch.nn.parallel.DataParallel(model)
model.cuda()
test_dst = S3DISWholeScene_evaluation(root=_cfg['s3dis_data_root'],
split='test',
test_area=args.test_area,
block_points=NUM_POINTS,
block_size=args.block_size,
stride=args.stride,
with_rgb=True)
pn_list = test_dst.point_num
scene_list = test_dst.scene_list
SEM_LABELS = test_dst.semantic_labels_list
test_loader = DataLoader(test_dst, batch_size=args.batch_size,
shuffle=False, pin_memory=True, num_workers=0)
test(model, test_loader, pn_list, scene_list)
| 5,981 | 35.036145 | 87 | py |
FPConv | FPConv-master/tools/train_scannet.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.distributed as dist
import os, sys
import argparse
import importlib
import numpy as np
import json
import tensorboard_logger as tb_log
from datasets.scannet_dataset_rgb import ScannetDataset, ScannetDatasetWholeScene
from utils.saver import Saver
from utils.switchnorm import convert_sn
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0,1')
parser.add_argument("--batch_size", type=int, default=12)
parser.add_argument("--epochs", type=int, default=300)
parser.add_argument('--workers', type=int, default=12)
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--model", type=str, default='fpcnn_scannet_tiny_v3')
parser.add_argument("--save_dir", type=str, default='logs/test_scannet_tiny')
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument("--use_sn", action='store_true', default=False)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--lr_clip', type=float, default=0.000001)
parser.add_argument('--decay_step_list', type=list, default=[100, 200, 300])
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--sample_rate", type=float, default=None)
parser.add_argument("--with_rgb", action='store_true', default=False)
parser.add_argument("--with_norm", action='store_true', default=False)
parser.add_argument("--num_points", type=int, default=8192)
parser.add_argument("--accum", type=int, default=24)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# load config file
with open(args.config, 'r') as f:
_cfg = json.load(f)
NUM_CLASSES = 21
NUM_POINTS = args.num_points
saver = Saver(args.save_dir, max_files=100)
print(args)
print(_cfg)
def log_str(info):
print(info)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
log_str("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_str("==> Done")
else:
raise FileNotFoundError
return epoch
class CrossEntropyLossWithWeights(torch.nn.Module):
def __init__(self):
super().__init__()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, predict, target, weights):
"""
:param predict: (B,N,C)
:param target: (B,N)
:param weights: (B,N)
:return:
"""
predict = predict.view(-1, NUM_CLASSES).contiguous() # B*N, C
target = target.view(-1).contiguous().cuda().long() # B*N
weights = weights.view(-1).contiguous().cuda().float() # B*N
loss = self.cross_entropy_loss(predict, target) # B*N
loss *= weights
loss = torch.mean(loss)
return loss
def train_one_epoch(model, dst_loader, optimizer, epoch, tb_log):
model.train()
loss_func = CrossEntropyLossWithWeights()
repeat = args.accum // args.batch_size
log_str(' --- train, accumulate gradients for {} times. Total bacth size is {}.'.format(repeat, args.accum))
loss_list = []
loss_temp_list = []
correct_temp = 0
seen_temp = 0
total_correct = 0
total_seen = 0
optimizer.zero_grad()
# for it, batch in tqdm(enumerate(dst_loader)):
for it, batch in enumerate(dst_loader):
point_set, semantic_seg, sample_weight = batch
point_set = point_set.cuda().float()
predict = model(point_set) # B,N,C
loss = loss_func(predict, semantic_seg, sample_weight)
loss_norm = loss / repeat
loss_norm.backward()
# accumulate gradient
if (it + 1) % repeat == 0 or (it + 1) == len(dst_loader):
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
# 1. loss
loss_list.append(loss.item())
# 2. accuracy
predict = torch.argmax(predict, dim=2).cpu().numpy() # B,N
semantic_seg = semantic_seg.numpy()
correct = np.sum(predict == semantic_seg)
batch_seen = predict.shape[0] * NUM_POINTS
total_correct += correct
total_seen += batch_seen
# save temp data
loss_temp_list.append(loss.item())
correct_temp += correct
seen_temp += batch_seen
if (it + 1) % 100 == 0:
log_str(' -- batch: {}/{} -- '.format(it+1, len(dst_loader)))
log_str('accuracy: {:.4f}'.format(correct_temp / seen_temp))
log_str('mean loss: {:.4f}'.format(np.mean(loss_temp_list)))
loss_temp_list = []
correct_temp = 0
seen_temp = 0
log_str(' -- epoch accuracy: {:.4f}'.format(total_correct / total_seen))
log_str(' -- epoch mean loss: {:.4f}'.format(np.mean(loss_list)))
if epoch % 5 == 0:
tb_log.log_value('epoch oA', total_correct / total_seen, epoch)
tb_log.log_value('epoch loss', np.mean(loss_list), epoch)
def eval_one_epoch(model, dst_loader, epoch, tb_log):
model.eval()
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]
loss_func = CrossEntropyLossWithWeights()
loss_list = []
with torch.no_grad():
for it, batch in enumerate(dst_loader):
batch_data, batch_label, batch_smpw = batch
batch_data = batch_data.cuda().float()
pred_val = model(batch_data) # B,N,C
loss = loss_func(pred_val, batch_label, batch_smpw)
loss_list.append(loss.item())
# convert to numpy array
pred_val = torch.argmax(pred_val, dim=2).cpu().numpy() # B,N
batch_label = batch_label.numpy()
batch_smpw = batch_smpw.numpy()
correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0))
total_correct += correct
total_seen += np.sum((batch_label>0) & (batch_smpw>0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0))
total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0))
total_iou_deno_class[l] += np.sum(((pred_val==l) | (batch_label==l)) & (batch_smpw>0) & (batch_label>0))
IoU = np.array(total_correct_class[1:])/(np.array(total_iou_deno_class[1:],dtype=np.float)+1e-6)
log_str('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
log_str('Class %d : %.4f'%(i+1, IoU[i]))
log_str('eval loss: %f'% (np.mean(loss_list)))
log_str('eval accuracy: %f'% (total_correct / float(total_seen)))
log_str('eval avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
tb_log.log_value('Eval loss', np.mean(loss_list), epoch)
tb_log.log_value('Eval mIoU', np.mean(IoU), epoch)
tb_log.log_value('Eval oA', total_correct / float(total_seen), epoch)
tb_log.log_value('Eval mA', np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6)), epoch)
return np.mean(IoU)
def train(model, train_loader, eval_loader, tb_log, resume_epoch=0):
# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.98,
weight_decay=args.weight_decay,
nesterov=True)
# init lr scheduler
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in args.decay_step_list:
if cur_epoch >= decay_step:
cur_decay = cur_decay * args.lr_decay
return max(cur_decay, args.lr_clip / args.lr)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lbmd)
best_miou = 0
best_epoch = 0
for epoch in range(args.epochs):
# resume training epoch
if epoch < resume_epoch: continue
elif resume_epoch > 0:
log_str('====== resume epoch {} ======'.format(epoch))
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
log_str(' === Best mIoU: {}, epoch {}. === '.format(best_miou, best_epoch))
lr_scheduler.step(epoch)
resume_epoch = 0
continue
# training
log_str('====== epoch {} ======'.format(epoch))
train_one_epoch(model, train_loader, optimizer, epoch, tb_log)
lr_scheduler.step(epoch)
# evaluate model
if (epoch > 0 and epoch % 20 == 0) or \
(epoch > 220 and epoch % 5 == 0):
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
saver.save_checkpoint(model, epoch, 'pn2_best_epoch_{}'.format(epoch))
log_str(' === Best mIoU: {}, epoch {}. === '.format(best_miou, best_epoch))
if __name__ == '__main__':
input_channels = 0
if args.with_rgb: input_channels += 3
if args.with_norm: input_channels += 3
print('model input_channel: {}.'.format(input_channels))
# model init
MODEL = importlib.import_module('models.' + args.model)
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels, num_pts=args.num_points)
if args.use_sn:
print(' --- use sn')
model = utils.convert_sn(model)
# resume
from_epoch = 0
if args.resume:
from_epoch = load_checkpoint(model, args.resume)
model = nn.parallel.DataParallel(model)
model.cuda()
# init tb_log
tb_log.configure(os.path.join(args.save_dir, 'tensorboard'))
# eval dataloader
eval_dst = ScannetDatasetWholeScene(root=_cfg['scannet_pickle'],
npoints=NUM_POINTS,
split='eval',
with_norm=args.with_norm,
with_rgb=args.with_rgb)
eval_loader = DataLoader(eval_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=0)
# train dataloader
train_dst = ScannetDataset(root=_cfg['scannet_pickle'],
npoints=NUM_POINTS,
split='train' if args.mode == 'train' else 'eval',
with_dropout=True,
with_norm=args.with_norm,
with_rgb=args.with_rgb,
sample_rate=args.sample_rate)
train_loader = DataLoader(train_dst,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=args.workers,
drop_last=True) # sync_bn will raise an unknown error with batch size of 1.
train(model, train_loader, eval_loader, tb_log, from_epoch) | 11,795 | 37.423453 | 137 | py |
FPConv | FPConv-master/tools/train_s3dis.py | import os, sys
import argparse
import importlib
import numpy as np
import json
import time
import tensorboard_logger as tb_log
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets.s3dis_dataset import S3DIS
from utils.saver import Saver
np.seterr(divide='ignore', invalid='ignore')
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--gpu", type=str, default='0,1,2,3')
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--epochs", type=int, default=101)
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--num_classes', type=int, default=13)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--start_eval_epoch', type=int, default=0)
parser.add_argument("--accum_steps", type=int, default=8)
parser.add_argument('--sample_rate_eval', type=float, default=1)
parser.add_argument('--sample_rate_train', type=float, default=0.5)
parser.add_argument('--num_pts', type=int, default=14564)
parser.add_argument('--block_size', type=float, default=2)
parser.add_argument('--test_area', type=int, default=5)
parser.add_argument("--model", type=str, default='fpcnn_s3dis')
parser.add_argument("--save_dir", type=str, default='logs/test_s3dis/')
parser.add_argument("--config", type=str, default='./config.json')
parser.add_argument('--bn_momentum', type=float, default=0.02)
parser.add_argument('--warmup_epochs', type=int, default=8)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_decay', type=float, default=0.5)
parser.add_argument('--lr_clip', type=float, default=0.000001)
parser.add_argument('--decay_step_list', type=list, default=[25, 50, 75])
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument("--resume", type=str, default=None)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(args)
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
NUM_CLASSES = args.num_classes
NUM_POINTS = args.num_pts
saver = Saver(args.save_dir)
class WarmStart:
"""Warm up learning rate"""
def __init__(self, optimizer, steps, lr):
'''
steps: Warm up steps, if it is 0, warm up is not activated
lr: target learning rate
'''
self.optimizer = optimizer
self.steps = steps
self.iter = 0
if steps != 0:
self.increment = lr / steps
for param_group in self.optimizer.param_groups:
param_group['lr'] = 0
def step(self):
self.iter += 1
if self.iter < self.steps:
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.iter * self.increment
def log_str(info):
print(info)
def reset_bn(model, momentum=args.bn_momentum):
'''
Reset bn momentum
'''
for m in model.modules():
if isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):
m.momentum = momentum
def save_config(args, _cfg):
'''
Save configs currently using, along with the checkpoints, etc.
'''
f1 = os.path.join(args.save_dir, 'args.txt')
f2 = os.path.join(args.save_dir, 'configs.txt')
with open(f1, 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(f2, 'w') as f:
json.dump(_cfg, f, indent=2)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
print("==> Done")
else:
raise FileNotFoundError
return epoch
class CrossEntropyLossWithWeights(torch.nn.Module):
def __init__(self):
super().__init__()
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, predict, target, weights):
"""
:param predict: (B,N,C)
:param target: (B,N)
:param weights: (B,N)
:return:
"""
predict = predict.view(-1, NUM_CLASSES).contiguous() # B*N, C
target = target.view(-1).contiguous().cuda().long() # B*N
weights = weights.view(-1).contiguous().cuda().float() # B*N
loss = self.cross_entropy_loss(predict, target) # B*N
loss *= weights
loss = torch.mean(loss)
return loss
def train_one_epoch(model, dst_loader, optimizer, epoch, tb_log, warmup=None):
model.train()
loss_func = CrossEntropyLossWithWeights()
optimizer.zero_grad()
loss_list = []
total_correct = 0
total_seen = 0
start_time = time.time()
for it, batch in enumerate(dst_loader):
point_set, semantic_seg, sample_weight = batch
point_set = point_set.cuda().float()
predict = model(point_set) # B,N,C
loss = loss_func(predict, semantic_seg, sample_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (it + 1) % args.accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
if epoch <= args.warmup_epochs:
warmup.step()
# 1. loss
loss_list.append(loss.item())
# 2. accuracy
predict = torch.argmax(predict, dim=2).cpu().numpy() # B,N
semantic_seg = semantic_seg.numpy()
correct = np.sum(predict == semantic_seg)
batch_seen = predict.shape[0] * NUM_POINTS
total_correct += correct
total_seen += batch_seen
if (it + 1) % 100 == 0:
time_cost = time.time() - start_time
log_str(' -- batch: {}/{} -- '.format(it + 1, len(dst_loader)))
log_str('accuracy: {:.4f}'.format(total_correct / total_seen))
log_str('mean loss: {:.4f}'.format(np.mean(loss_list)))
log_str('time cost: {:.2f}'.format(time_cost))
start_time = time.time()
iternum = epoch * len(dst_loader) + it + 1
tb_log.log_value('Train/IterAcc', total_correct /
total_seen, iternum)
tb_log.log_value('Train/IterLoss', np.mean(loss_list), iternum)
tb_log.log_value('Train/Learning rate',
optimizer.param_groups[0]['lr'], iternum)
log_str(' -- epoch accuracy: {:.4f}'.format(total_correct / total_seen))
log_str(' -- epoch mean loss: {:.4f}'.format(np.mean(loss_list)))
tb_log.log_value('Train/epoch oA', total_correct / total_seen, epoch)
tb_log.log_value('Train/epoch loss', np.mean(loss_list), epoch)
lr = optimizer.param_groups[0]['lr']
tb_log.log_value('Learning rate', lr, epoch)
def eval_one_epoch(model, dst_loader, epoch, tb_log):
model.eval()
loss_func = CrossEntropyLossWithWeights()
total_correct = 0
total_seen = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]
loss_list = []
with torch.no_grad():
for it, batch in enumerate(dst_loader):
batch_data, batch_label, batch_smpw = batch
batch_data = batch_data.cuda().float()
pred_val = model(batch_data) # B,N,C
loss = loss_func(pred_val, batch_label, batch_smpw)
loss_list.append(loss.item())
pred_val = torch.argmax(pred_val, dim=2).cpu().numpy() # B,N
batch_label = batch_label.numpy()
batch_smpw = batch_smpw.numpy()
aug_data = batch_data.cpu().numpy()
correct = np.sum((pred_val == batch_label))
total_correct += correct
total_seen += np.sum((batch_label >= 0) & (batch_smpw > 0))
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label == l)
& (batch_smpw > 0))
total_correct_class[l] += np.sum((pred_val == l)
& (batch_label == l) & (batch_smpw > 0))
total_iou_deno_class[l] += np.sum(
((pred_val == l) | (batch_label == l)) & (batch_smpw > 0))
IoU = np.array(total_correct_class /
np.array(total_iou_deno_class, dtype=np.float) + 1e-6)
avg_acc = np.mean(np.array(total_correct_class) /
(np.array(total_seen_class, dtype=np.float) + 1e-6))
if np.mean(loss_list) > 20:
saver.save_checkpoint(
model, epoch, 'loss_explosion_epoch_{}'.format(epoch))
log_str('eval point avg class IoU: %f' % (np.mean(IoU)))
IoU_Class = 'Each Class IoU:::\n'
for i in range(IoU.shape[0]):
log_str('Class %d : %.4f' % (i + 1, IoU[i]))
log_str('eval accuracy: %f' % (total_correct / float(total_seen)))
log_str('eval avg class acc: %f' % (avg_acc))
tb_log.log_value('Eval/mIoU', np.mean(IoU), epoch)
tb_log.log_value('Eval/oA', total_correct / float(total_seen), epoch)
tb_log.log_value('Eval/mA', avg_acc, epoch)
tb_log.log_value('Eval/Loss', np.mean(loss_list), epoch)
return np.mean(IoU)
def train(model, train_loader, eval_loader, tb_log, resume_epoch=0):
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.98,
weight_decay=args.weight_decay,
nesterov=True)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
warmup_steps = int(args.warmup_epochs * len(train_loader) / args.accum_steps)
warmup = WarmStart(optimizer, warmup_steps, args.lr)
best_miou = 0
best_epoch = 0
for epoch in range(args.epochs):
if epoch < resume_epoch:
continue
elif resume_epoch > 0:
log_str('====== resume epoch {} ======'.format(epoch))
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
log_str(' === Best mIoU: {}, epoch {}. === '.format(
best_miou, best_epoch))
if epoch >= args.warmup_epochs:
lr_scheduler.step(epoch)
resume_epoch = 0
continue
log_str('====== epoch {} ======'.format(epoch))
train_one_epoch(model, train_loader, optimizer, epoch, tb_log, warmup)
if epoch >= args.warmup_epochs:
lr_scheduler.step(epoch)
if (epoch >= args.start_eval_epoch and epoch % args.eval_freq == 0) or \
(epoch > 80 and epoch % 2 == 0):
log_str('====== Evaluation ======')
miou = eval_one_epoch(model, eval_loader, epoch, tb_log)
if miou > best_miou:
best_miou = miou
best_epoch = epoch
saver.save_checkpoint(
model, epoch, 'pn2_best_epoch_{}'.format(epoch))
log_str(' === Best mIoU: {}, epoch {}. === '.format(
best_miou, best_epoch))
if __name__ == '__main__':
save_config(args, _cfg)
MODEL = importlib.import_module('models.' + args.model)
input_channels = 6
print('model input_channel: {}.'.format(input_channels))
model = MODEL.get_model(num_class=NUM_CLASSES, input_channels=input_channels)
reset_bn(model)
# resume
from_epoch = 0
if args.resume:
from_epoch = load_checkpoint(model, args.resume)
print("resume from {}".format(from_epoch))
model = nn.parallel.DataParallel(model)
model.cuda()
# init tb_log
tb_log.configure(os.path.join(args.save_dir, 'tensorboard'))
# Sample rate is the num of voting
eval_dst = S3DIS(split='eval',
data_root=_cfg['s3dis_data_root'],
num_point=args.num_pts,
test_area=args.test_area,
block_size=args.block_size,
sample_rate=args.sample_rate_eval,
transform=None,
if_normal=True)
eval_loader = DataLoader(eval_dst, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
train_dst = S3DIS(split='train',
data_root=_cfg['s3dis_data_root'],
num_point=args.num_pts,
test_area=args.test_area,
block_size=args.block_size,
sample_rate=args.sample_rate_train,
transform=None,
if_normal=True)
train_loader = DataLoader(train_dst, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
# train model
train(model, train_loader, eval_loader, tb_log, from_epoch)
| 12,945 | 35.162011 | 125 | py |
FPConv | FPConv-master/models/fpcnn_scannet.py | import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINT = 8192
NPOINTS = [NPOINT // 2, NPOINT // 8, NPOINT // 32, NPOINT // 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[32,32], [64,64], [128,128], [256,256], [512,512]]
FP_MLPS = [[64,64], [128,64], [256,128], [512,256]]
CLS_FC = [64]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3, num_pts=None):
if num_pts is None:
num_pts = NPOINT
return FPCNN_ScanNet(num_pts, num_class, input_channels)
class FPCNN_ScanNet(nn.Module):
def __init__(self, num_pts, num_class, input_channels, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
NPOINT = num_pts
NPOINTS = [NPOINT // 2, NPOINT // 8, NPOINT // 32, NPOINT // 128]
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i])
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls
| 4,571 | 37.1 | 94 | py |
FPConv | FPConv-master/models/fpcnn_s3dis.py | import torch
import torch.nn as nn
from fpconv.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModule
import fpconv.pointnet2.pytorch_utils as pt_utils
from fpconv.base import AssemRes_BaseBlock
from fpconv.fpconv import FPConv4x4_BaseBlock, FPConv6x6_BaseBlock
NPOINTS = [8192, 2048, 512, 128]
RADIUS = [0.1, 0.2, 0.4, 0.8, 1.6]
NSAMPLE = [32, 32, 32, 32, 16]
MLPS = [[64,64], [128,128], [256,256], [512,512], [1024,1024]]
FP_MLPS = [[128,128], [256,128], [512,256], [1024,512]]
CLS_FC = [128]
DP_RATIO = 0.5
def get_model(num_class, input_channels=3):
return Pointnet2SSG(num_class, input_channels)
class Pointnet2SSG(nn.Module):
def __init__(self, num_class, input_channels=3, use_xyz=False):
# input_channels: input feature channels (not include xyz)
super().__init__()
print(NPOINTS)
self.SA_modules = nn.ModuleList()
self.conv0 = AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=None,
radius=RADIUS[0],
nsample=NSAMPLE[0],
channel_list=[input_channels] + MLPS[0],
use_xyz=use_xyz)
channel_in = MLPS[0][-1]
skip_channel_list = [channel_in]
for k in range(NPOINTS.__len__()):
mlps = [MLPS[k+1].copy()]
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
print(mlps[0], RADIUS[k], RADIUS[k+1])
if k < 2:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv6x6_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
else:
self.SA_modules.append(
AssemRes_BaseBlock(
CONV_BASE=FPConv4x4_BaseBlock,
npoint=NPOINTS[k],
nsample=NSAMPLE[k],
radius=RADIUS[k],
channel_list=mlps[0],
nsample_ds=NSAMPLE[k+1],
radius_ds=RADIUS[k+1],
use_xyz=use_xyz))
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
mlp = [pre_channel + skip_channel_list[k]] + FP_MLPS[k]
print(mlp)
self.FP_modules.append(PointnetFPModule(mlp=mlp))
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv2d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv2d(pre_channel, num_class, activation=None, bn=False))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
_, features = self.conv0(xyz, features)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
fn_feats = l_features[0].unsqueeze(-1) # B, C, N, 1
pred_cls = self.cls_layer(fn_feats).squeeze(-1).transpose(1, 2).contiguous() # B, N, C
return pred_cls | 4,378 | 37.412281 | 94 | py |
FPConv | FPConv-master/fpconv/base.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fpconv.pointnet2 import pointnet2_utils
from fpconv.pointnet2 import pytorch_utils as pt_utils
relu_alpha = 0.2
class PointNet(nn.Module):
def __init__(self, mlp, pool='max', bn=True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn, activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, pcd):
'''
:param pcd: B, C, npoint, nsample
:return:
new_pcd: B, C_new, npoint, 1
'''
new_pcd = self.mlp(pcd) # B, C_new, npoint, nsample
new_pcd = F.max_pool2d(new_pcd, kernel_size=[1, new_pcd.size(3)]) # B, C_new, npoint, 1
return new_pcd
class ProjWeightModule(nn.Module):
def __init__(self, mlp_pn, mlp_wts, map_size, bn=True):
super().__init__()
map_len = map_size ** 2
mlp_pn = [3] + mlp_pn
mlp_wts = [mlp_pn[-1] + 3] + mlp_wts + [map_len] # 3+C_new => map_len
self.pn_layer = PointNet(mlp_pn, bn=bn)
self.wts_layer = pt_utils.SharedMLP(mlp_wts,
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, xyz):
'''
:param xyz: B, 3, npoint, nsample <local>
:return:
proj_wts: B, map_len, npoint, nsample
'''
nsample = xyz.size(3)
dist_feat = self.pn_layer(xyz) # B, C_new, npoint, 1
dist_feat = dist_feat.expand(-1, -1, -1, nsample) # B, C_new, npoint, nsample
dist_feat = torch.cat([xyz, dist_feat], dim=1) # B, C_new+3, npoint, nsample
proj_wts = self.wts_layer(dist_feat) # B, map_len, npoint, nsample
return proj_wts
class PN_Block(nn.Module):
def __init__(self, in_channel, out_channel, bn=True, activation=True):
# Shared MLPs
super().__init__()
self.conv = pt_utils.Conv2d(in_size=in_channel,
out_size=out_channel,
kernel_size=(1,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True) if activation else None)
def forward(self, pcd):
'''
:param pcd: B, C_in, npoint
:return:
new_pcd: B, C_out, npoint
'''
pcd = pcd.unsqueeze(-1)
return self.conv(pcd).squeeze(-1)
class Pooling_Block(nn.Module):
def __init__(self, radius, nsample, in_channel, out_channel, npoint=None, bn=True, activation=True):
super().__init__()
self.radius = radius
self.nsample = nsample
self.npoint = npoint
self.conv = PN_Block(in_channel, out_channel, bn=bn, activation=activation)
def forward(self, xyz, feats, new_xyz=None):
'''
:param pcd: B, C_in, N
:return:
new_pcd: B, C_out, np
'''
if new_xyz is None:
assert self.npoint is not None
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz, new_xyz)
gped_feats = pointnet2_utils.grouping_operation(feats, idx) # B,C,np,ns
gped_feats = F.max_pool2d(gped_feats, kernel_size=[1, self.nsample]) # B,C,np,1
gped_feats = gped_feats.squeeze(-1) # B,C,np
return self.conv(gped_feats)
class Resnet_BaseBlock(nn.Module):
def __init__(self, FPCONV,
npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
'''
pcd => 1x1 conv <relu+bn> => tconv <relu+bn> => 1x1 conv <bn>
shortcut: pcd => (max_pooling) => 1x1 conv <bn> [apply projection shortcut]
:param npoint: set to None to ignore 'max_pooling'
:param nsample, radius: params related to grouper
'''
super().__init__()
self.keep_pcd = npoint is None
self.is_im = in_channel == out_channel
self.mid_channel = out_channel // 2 # <Bottleneck Design Block>
self.conv1 = PN_Block(in_channel=in_channel,
out_channel=self.mid_channel,
bn=bn)
self.conv2 = FPCONV(npoint=npoint,
nsample=nsample,
radius=radius,
in_channel=self.mid_channel,
out_channel=self.mid_channel,
bn=bn,
use_xyz=use_xyz)
self.conv3 = PN_Block(in_channel=self.mid_channel,
out_channel=out_channel,
bn=bn,
activation=False)
if self.keep_pcd and not self.is_im:
self.sonv0 = PN_Block(in_channel=in_channel,
out_channel=out_channel,
bn=bn,
activation=False)
elif not self.keep_pcd:
self.sonv0 = Pooling_Block(radius=radius,
nsample=nsample,
in_channel=in_channel,
out_channel=out_channel,
bn=bn,
activation=False)
def forward(self, xyz, feats, new_xyz=None):
assert (self.keep_pcd and new_xyz is None) or not self.keep_pcd, 'invalid new_xyz.'
new_feats = self.conv1(feats)
new_xyz, new_feats = self.conv2(xyz, new_feats, new_xyz)
new_feats = self.conv3(new_feats)
shc_feats = feats
if self.keep_pcd and not self.is_im: # if in != out, applt an additional projection mlp
shc_feats = self.sonv0(shc_feats) # mlp
if not self.keep_pcd: # not keep pcd, apply fpconv with fps
shc_feats = self.sonv0(xyz, feats, new_xyz) # pooling + mlp
new_feats = F.leaky_relu(shc_feats + new_feats, negative_slope=relu_alpha ,inplace=True)
return new_xyz, new_feats
class AssemRes_BaseBlock(nn.Module):
def __init__(self, CONV_BASE,
npoint, nsample, radius, channel_list, nsample_ds=None, radius_ds=None, bn=True, use_xyz=False):
'''
Apply downsample and conv on input pcd
:param npoint: the number of points to sample
:param nsample: the number of neighbors to group when conv
:param radius: radius of ball query to group neighbors
:param channel_list: List<a, c, c, ...>, the elements from <1> to the last must be the same
'''
super().__init__()
if nsample_ds is None:
nsample_ds = nsample
if radius_ds is None:
radius_ds = radius
self.conv_blocks = nn.ModuleList()
for i in range(len(channel_list) - 1):
in_channel = channel_list[i]
out_channel = channel_list[i+1]
self.conv_blocks.append(Resnet_BaseBlock(FPCONV=CONV_BASE,
npoint=npoint if i == 0 else None,
nsample=nsample if i == 0 else nsample_ds,
radius=radius if i == 0 else radius_ds,
in_channel=in_channel,
out_channel=out_channel,
bn=bn,
use_xyz=use_xyz))
def forward(self, xyz, feats, new_xyz=None):
for i, block in enumerate(self.conv_blocks):
xyz, feats = block(xyz, feats, new_xyz)
return xyz, feats
| 8,204 | 40.649746 | 125 | py |
FPConv | FPConv-master/fpconv/fpconv.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fpconv.pointnet2 import pointnet2_utils
from fpconv.pointnet2 import pytorch_utils as pt_utils
from fpconv import base
relu_alpha = 0.2
class FPConv4x4_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv4x4 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16], mlp_wts=[16], map_size=4, bn=bn)
if use_xyz:
in_channel += 3
self.proj_conv = pt_utils.Conv2d(in_size=in_channel,
out_size=out_channel,
kernel_size=(16,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
self.npoint = new_xyz.size(1)
else: # keep pcd
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml+1,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
multi = proj_wts.matmul(grouped_feats)
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats), negative_slope=relu_alpha, inplace=True) # B, np, ml, C
proj_feats = proj_feats.transpose(1,3) # B, C, ml, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, np
proj_feats = proj_feats.squeeze(2) # B, C_new, np
return new_xyz, proj_feats
class FPConv6x6_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv6x6 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.map_size = 6
self.map_len = self.map_size ** 2
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16,16], mlp_wts=[16,32], map_size=6, bn=bn)
if use_xyz:
in_channel += 3
self.bias = Parameter(torch.Tensor(in_channel))
mid_channel = in_channel
self.proj_conv = nn.Sequential(
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=mid_channel,
out_size=out_channel,
kernel_size=(2,2,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.bias, -0.05)
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
idx = None
self.npoint = new_xyz.size(1)
else:
idx = None
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
# proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats) + self.bias, negative_slope=relu_alpha, inplace=True) # B, np, ml, C
# reshape projection features # B, np, ml, C => B, C, ms, ms, np
bs = proj_feats.size(0)
proj_feats = proj_feats.transpose(1, 3) # B, C, ml, np
proj_feats = proj_feats.view(bs, -1, self.map_size, self.map_size, self.npoint).contiguous() # B, C, ms, ms, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, 1, np
proj_feats = proj_feats.squeeze(3).squeeze(2) # B, C_new, np
return new_xyz, proj_feats | 8,087 | 41.793651 | 133 | py |
FPConv | FPConv-master/fpconv/pointnet2/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='pointnet2',
ext_modules=[
CUDAExtension('pointnet2_cuda', [
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
],
cmdclass={'build_ext': BuildExtension}
)
| 679 | 27.333333 | 67 | py |
FPConv | FPConv-master/fpconv/pointnet2/pointnet2_utils.py | import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
import pointnet2_cuda as pointnet2
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroupLocal(nn.Module):
def __init__(self, radius: float, nsample: int):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample = radius, nsample
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
grouped_xyz: B, 3, npoint, nsample <local coordinates>
new_features: (B, C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
# print(idx[0, 50, :])
# print(idx[0, 51, :])
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) # local xyz
grouped_features = grouping_operation(features, idx)
return grouped_xyz, grouped_features
# if features is not None:
# grouped_features = grouping_operation(features, idx)
# if self.use_xyz:
# new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
# else:
# new_features = grouped_features
# else:
# assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
# new_features = grouped_xyz
# return new_features
class QueryAndGroupXYZ(nn.Module):
def __init__(self, radius: float, nsample: int):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample = radius, nsample
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:return:
grouped_xyz: B, 3, npoint, nsample <local coordinates>
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) # local xyz
return grouped_xyz
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 12,424 | 33.803922 | 118 | py |
FPConv | FPConv-master/fpconv/pointnet2/pointnet2_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
from . import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
bn: bool = True, use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
pool_method=pool_method, instance_norm=instance_norm
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass
| 6,338 | 38.61875 | 119 | py |
FPConv | FPConv-master/fpconv/pointnet2/pytorch_utils.py | import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = "",
instance_norm: bool = False,):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact,
instance_norm=instance_norm
)
)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name="",
instance_norm=False,
instance_norm_func=None
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if instance_norm:
if not preact:
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
else:
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm1d
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm2d
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm3d
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
| 7,312 | 25.305755 | 95 | py |
FPConv | FPConv-master/datasets/s3dis_dataset.py | import os
import numpy as np
import sys
from torch.utils.data import Dataset
class S3DIS(Dataset):
def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096, test_area=5, block_size=1.0, sample_rate=1.0, transform=None, if_normal=True):
super().__init__()
print('Initiating DataLoader....')
self.if_normal = if_normal
self.num_point = num_point
self.block_size = block_size
self.transform = transform
rooms = sorted(os.listdir(data_root))
rooms = [room for room in rooms if 'Area_' in room]
if split == 'train':
rooms_split = [
room for room in rooms if not 'Area_{}'.format(test_area) in room]
else:
rooms_split = [
room for room in rooms if 'Area_{}'.format(test_area) in room]
self.room_points, self.room_labels = [], []
self.room_coord_min, self.room_coord_max = [], []
num_point_all = []
for room_name in rooms_split:
room_path = os.path.join(data_root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
# xyzrgb, N*6; l, N
points, labels = room_data[:, 0:6], room_data[:, 6]
points[:, 0:3] -= np.amin(points, axis=0)[0:3]
coord_min, coord_max = np.amin(points, axis=0)[
:3], np.amax(points, axis=0)[:3]
self.room_points.append(points), self.room_labels.append(labels)
self.room_coord_min.append(
coord_min), self.room_coord_max.append(coord_max)
num_point_all.append(labels.size)
# Generate label weights
self.labelweights = self.__gen_labelweights(self.room_labels)
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(rooms_split)):
room_idxs.extend(
[index] * int(round(sample_prob[index] * num_iter)))
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
print('Num of labels: ', len(self.room_labels))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __gen_labelweights(self, labels):
labelweights = np.zeros(13)
for seg in labels:
tmp, _ = np.histogram(seg, range(14))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights / np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
return np.power(np.amax(labelweights) / labelweights, 1 / 3.0)
def __getitem__(self, idx):
room_idx = self.room_idxs[idx]
points = self.room_points[room_idx] # N * 6
labels = self.room_labels[room_idx] # N
N_points = points.shape[0]
while True:
center = points[np.random.choice(N_points)][:3]
block_min = center - [self.block_size /
2.0, self.block_size / 2.0, 0]
block_max = center + [self.block_size /
2.0, self.block_size / 2.0, 0]
point_idxs = np.where((points[:, 0] >= block_min[0]) & (points[:, 0] <= block_max[0]) & (
points[:, 1] >= block_min[1]) & (points[:, 1] <= block_max[1]))[0]
if point_idxs.size > 1024:
break
if point_idxs.size >= self.num_point:
selected_point_idxs = np.random.choice(
point_idxs, self.num_point, replace=False)
else:
selected_point_idxs = np.random.choice(
point_idxs, self.num_point, replace=True)
# normalize
selected_points = points[selected_point_idxs, :] # num_point * 6
selected_points[:, 0] = selected_points[:, 0] - center[0]
selected_points[:, 1] = selected_points[:, 1] - center[1]
selected_points[:, 3:6] /= 255.0
if self.if_normal:
current_points = np.zeros((self.num_point, 9)) # num_point * 9
current_points[:, 6] = selected_points[:, 0] / \
self.room_coord_max[room_idx][0]
current_points[:, 7] = selected_points[:, 1] / \
self.room_coord_max[room_idx][1]
current_points[:, 8] = selected_points[:, 2] / \
self.room_coord_max[room_idx][2]
current_points[:, 0:6] = selected_points
else:
current_points = selected_points
current_labels = labels[selected_point_idxs]
if self.transform is not None:
current_points, current_labels = self.transform(
current_points, current_labels)
sampleweights = self.labelweights[current_labels.astype(np.uint8)]
return current_points, current_labels, sampleweights
def __len__(self):
return len(self.room_idxs)
if __name__ == '__main__':
data_root = '/home/zizheng/data/s3dis/stanford_indoor3d_all_classes'
num_point, test_area, block_size, sample_rate = 4096, 5, 1.0, 0.01
import psutil
print("Before loading, the memory usage is ", psutil.virtual_memory())
point_data = S3DIS(split='train', data_root=data_root, num_point=num_point,
test_area=test_area, block_size=block_size, sample_rate=sample_rate, transform=None)
print('point data size:', point_data.__len__())
print('point data 0 shape:', point_data.__getitem__(0)[0].shape)
print('point label 0 shape:', point_data.__getitem__(0)[1].shape)
import torch
import time
import random
manual_seed = 123
random.seed(manual_seed)
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
print("After loading, the memory usage is ", psutil.virtual_memory())
def worker_init_fn(worker_id):
random.seed(manual_seed + worker_id)
train_loader = torch.utils.data.DataLoader(
point_data, batch_size=32, shuffle=True, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
for idx in range(4):
end = time.time()
for i, (points, target, weight) in enumerate(train_loader):
print('time: {}/{}--{}'.format(i + 1,
len(train_loader), time.time() - end))
print('Size of points: ', points.size())
points_np = points.cpu().data.numpy()
points_np_block1 = points_np[0, ...]
minp = points_np_block1[:, 0].min()
maxp = points_np_block1[:, 0].max()
print('weight is ', weight)
print('Min in x is {}, Max in x is {}'.format(minp, maxp))
print('Min in y is {}, Max in y is {}'.format(
points_np_block1[:, 1].min(), points_np_block1[:, 1].max()))
print("In loop, the memory usage is ", psutil.virtual_memory())
sys.exit(0)
| 7,008 | 42.265432 | 163 | py |
FPConv | FPConv-master/datasets/scannet_dataset_rgb_test.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class ScannetDatasetWholeScene_evaluation(torch_data.IterableDataset):
#prepare to give prediction on each points
def __init__(self, root=None, scene_list_dir=None, split='test', num_class=21, block_points=10240, with_norm=True, with_rgb=True):
super().__init__()
print(' ---- load data from', root)
self.block_points = block_points
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet <TEST> dataset <{}> with npoint {}, indices: {}.'.format(split, block_points, self.indices))
self.point_num = []
self.temp_data = []
self.temp_index = 0
self.now_index = 0
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
self.scene_points_id = pickle.load(fp)
self.scene_points_num = pickle.load(fp)
file_path = os.path.join(scene_list_dir, 'scannetv2_{}.txt'.format(split))
num_class = 21
if split == 'test' or split == 'eval' or split == 'train':
self.labelweights = np.ones(num_class)
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
with open(file_path) as fl:
self.scene_list = fl.read().splitlines()
else:
raise ValueError('split must be test or eval, {}'.format(split))
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
if self.now_index >= len(self.scene_points_list):
print(' ==== reset dataset index ==== ')
self.reset()
self.gen_batch_data()
return self
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [data[idx[i]]]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(block_center_list[i] - block_center, ord = 2) #i->j
return np.argsort(dist)[0]
def gen_batch_data(self):
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
print(' ==== generate batch data of {} ==== '.format(self.scene_list[index]))
delta = 0.5
# if self.with_rgb:
point_set_ini = self.scene_points_list[index]
# else:
# point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3],axis=0)
coordmin = np.min(point_set_ini[:, 0:3],axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/delta).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/delta).astype(np.int32)
point_sets = []
semantic_segs = []
sample_weights = []
point_idxs = []
block_center = []
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*delta,j*delta,0]
curmax = curmin+[2,2,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini[:,0:3]>=(curmin-0.2))*(point_set_ini[:,0:3]<=(curmax+0.2)),axis=1)==3
curchoice_idx = np.where(curchoice)[0]
cur_point_set = point_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set[:,0:3]>=(curmin-0.001))*(cur_point_set[:,0:3]<=(curmax+0.001)),axis=1)==3
sample_weight = self.labelweights[cur_semantic_seg]
sample_weight *= mask # N
point_sets.append(cur_point_set) # 1xNx3/6
semantic_segs.append(cur_semantic_seg) # 1xN
sample_weights.append(sample_weight) # 1xN
point_idxs.append(curchoice_idx) #1xN
block_center.append((curmin[0:2] + curmax[0:2]) / 2.0)
# merge small blocks
num_blocks = len(point_sets)
block_idx = 0
while block_idx < num_blocks:
if point_sets[block_idx].shape[0] > self.block_points // 2:
block_idx += 1
continue
small_block_data = point_sets[block_idx].copy()
small_block_seg = semantic_segs[block_idx].copy()
small_block_smpw = sample_weights[block_idx].copy()
small_block_idxs = point_idxs[block_idx].copy()
small_block_center = block_center[block_idx].copy()
point_sets.pop(block_idx)
semantic_segs.pop(block_idx)
sample_weights.pop(block_idx)
point_idxs.pop(block_idx)
block_center.pop(block_idx)
nearest_block_idx = self.nearest_dist(small_block_center, block_center)
point_sets[nearest_block_idx] = np.concatenate((point_sets[nearest_block_idx], small_block_data), axis = 0)
semantic_segs[nearest_block_idx] = np.concatenate((semantic_segs[nearest_block_idx], small_block_seg), axis = 0)
sample_weights[nearest_block_idx] = np.concatenate((sample_weights[nearest_block_idx], small_block_smpw), axis = 0)
point_idxs[nearest_block_idx] = np.concatenate((point_idxs[nearest_block_idx], small_block_idxs), axis = 0)
num_blocks = len(point_sets)
#divide large blocks
num_blocks = len(point_sets)
div_blocks = []
div_blocks_seg = []
div_blocks_smpw = []
div_blocks_idxs = []
div_blocks_center = []
for block_idx in range(num_blocks):
cur_num_pts = point_sets[block_idx].shape[0]
point_idx_block = np.array([x for x in range(cur_num_pts)])
if point_idx_block.shape[0]%self.block_points != 0:
makeup_num = self.block_points - point_idx_block.shape[0]%self.block_points
np.random.shuffle(point_idx_block)
point_idx_block = np.concatenate((point_idx_block,point_idx_block[0:makeup_num].copy()))
np.random.shuffle(point_idx_block)
sub_blocks = list(self.chunks(point_idx_block, self.block_points))
div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
div_blocks_seg += self.split_data(semantic_segs[block_idx], sub_blocks)
div_blocks_smpw += self.split_data(sample_weights[block_idx], sub_blocks)
div_blocks_idxs += self.split_data(point_idxs[block_idx], sub_blocks)
div_blocks_center += [block_center[block_idx].copy() for i in range(len(sub_blocks))]
for i in range(len(div_blocks)):
selected_points = div_blocks[i]
point_set = np.zeros([self.block_points, 9])
point_set[:, :3] = selected_points[:, :3] # xyz
for k in range(3): # normalized_xyz
point_set[:, 3 + k] = (selected_points[:, k] - coordmin[k]) / (coordmax[k] - coordmin[k])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
point_set = point_set[:, self.indices]
self.temp_data.append((point_set, div_blocks_seg[i], div_blocks_smpw[i], div_blocks_idxs[i]))
def __next__(self):
if self.temp_index >= len(self.temp_data):
raise StopIteration()
else:
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
| 8,118 | 42.417112 | 134 | py |
FPConv | FPConv-master/datasets/s3dis_dataset_test.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class S3DISWholeScene_evaluation(torch_data.IterableDataset):
# prepare to give prediction on each points
def __init__(self, root=None, split='test', test_area=5, num_class=13, block_points=8192, block_size=1.5, stride=0.5, with_rgb=True):
print('test area:', test_area)
self.root = root
self.split = split
self.with_rgb = with_rgb
self.block_points = block_points
self.block_size = block_size
self.stride = stride
self.point_num = []
self.temp_data = []
self.temp_index = 0
self.now_index = 0
self.scene_points_list = []
self.semantic_labels_list = []
rooms = sorted(os.listdir(root))
rooms = [room for room in rooms if 'Area_{}'.format(test_area) in room]
for room_name in rooms:
room_path = os.path.join(root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
# xyzrgb, N*6; l, N
points, labels = room_data[:, 0:6], room_data[:, 6]
points[:, 0:3] -= np.amin(points, axis=0)[0:3]
self.scene_points_list.append(points)
self.semantic_labels_list.append(labels)
self.scene_list = [i.replace('.npy', '') for i in rooms]
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
if self.now_index >= len(self.scene_points_list):
print(' ==== reset dataset index ==== ')
self.reset()
self.gen_batch_data()
return self
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [data[idx[i]]]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(
block_center_list[i] - block_center, ord=2) # i->j
return np.argsort(dist)[0]
def gen_batch_data(self):
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
print(' ==== generate batch data of {} ==== '.format(
self.scene_list[index]))
delta = self.stride
if self.with_rgb:
point_set_ini = self.scene_points_list[index]
else:
point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3], axis=0)
coordmin = np.min(point_set_ini[:, 0:3], axis=0)
nsubvolume_x = np.ceil(
(coordmax[0] - coordmin[0]) / delta).astype(np.int32)
nsubvolume_y = np.ceil(
(coordmax[1] - coordmin[1]) / delta).astype(np.int32)
point_sets = []
semantic_segs = []
sample_weights = []
point_idxs = []
block_center = []
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin + [i * delta, j * delta, 0]
curmax = curmin + [self.block_size, self.block_size, 0]
curchoice = np.where((point_set_ini[:, 0] >= curmin[0]) & (point_set_ini[:, 0] <= curmax[0]) & (
point_set_ini[:, 1] >= curmin[1]) & (point_set_ini[:, 1] <= curmax[1]))[0]
cur_point_set = point_set_ini[curchoice, :]
cur_semantic_seg = semantic_seg_ini[curchoice]
bc = (curmin[0:2] + curmax[0:2]) / 2.0
cur_point_set[:, 0] -= bc[0]
cur_point_set[:, 1] -= bc[1]
current_points = np.zeros((cur_point_set.shape[0], 9))
current_points[:, 6] = cur_point_set[:, 0] / coordmax[0]
current_points[:, 7] = cur_point_set[:, 1] / coordmax[1]
current_points[:, 8] = cur_point_set[:, 2] / coordmax[2]
current_points[:, 0:6] = cur_point_set
if len(cur_semantic_seg) == 0:
continue
point_sets.append(current_points) # 1xNx3/6
semantic_segs.append(cur_semantic_seg) # 1xN
point_idxs.append(curchoice) # 1xN
block_center.append(bc)
# merge small blocks
num_blocks = len(point_sets)
block_idx = 0
while block_idx < num_blocks:
if point_sets[block_idx].shape[0] > (self.block_points / 2):
block_idx += 1
continue
small_block_data = point_sets[block_idx].copy()
small_block_seg = semantic_segs[block_idx].copy()
small_block_idxs = point_idxs[block_idx].copy()
small_block_center = block_center[block_idx].copy()
point_sets.pop(block_idx)
semantic_segs.pop(block_idx)
point_idxs.pop(block_idx)
block_center.pop(block_idx)
nearest_block_idx = self.nearest_dist(
small_block_center, block_center)
point_sets[nearest_block_idx] = np.concatenate(
(point_sets[nearest_block_idx], small_block_data), axis=0)
semantic_segs[nearest_block_idx] = np.concatenate(
(semantic_segs[nearest_block_idx], small_block_seg), axis=0)
point_idxs[nearest_block_idx] = np.concatenate(
(point_idxs[nearest_block_idx], small_block_idxs), axis=0)
num_blocks = len(point_sets)
# divide large blocks
num_blocks = len(point_sets)
div_blocks = []
div_blocks_seg = []
# div_blocks_smpw = []
div_blocks_idxs = []
div_blocks_center = []
for block_idx in range(num_blocks):
cur_num_pts = point_sets[block_idx].shape[0]
point_idx_block = np.array([x for x in range(cur_num_pts)])
if point_idx_block.shape[0] % self.block_points != 0:
makeup_num = self.block_points - \
point_idx_block.shape[0] % self.block_points
np.random.shuffle(point_idx_block)
point_idx_block = np.concatenate(
(point_idx_block, point_idx_block[0:makeup_num].copy()))
np.random.shuffle(point_idx_block)
sub_blocks = list(self.chunks(point_idx_block, self.block_points))
div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
div_blocks_seg += self.split_data(
semantic_segs[block_idx], sub_blocks)
div_blocks_idxs += self.split_data(
point_idxs[block_idx], sub_blocks)
div_blocks_center += [block_center[block_idx].copy()
for i in range(len(sub_blocks))]
for i in range(len(div_blocks)):
point_set = div_blocks[i]
if self.with_rgb:
point_set[:, 3:6] /= 255.0
self.temp_data.append(
(point_set, div_blocks_seg[i], div_blocks_idxs[i]))
def __next__(self):
if self.temp_index >= len(self.temp_data):
raise StopIteration()
else:
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
if __name__ == '__main__':
test_dst = S3DISWholeScene_evaluation(root='/home/zizheng/data/s3dis/stanford_indoor3d_all_classes',
split='test',
test_area=5,
block_points=8192,
with_rgb=True)
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
loader = torch_data.DataLoader(
test_dst, batch_size=12, shuffle=False, pin_memory=True, num_workers=0)
for i, data in enumerate(loader):
a, b, d = data
print(a.shape)
print(np.max(a[0, :, 0].data.cpu().numpy()) -
np.min(a[0, :, 0].data.cpu().numpy()))
for i, data in enumerate(loader):
a, b, d = data
print(a.shape)
| 8,454 | 38.143519 | 137 | py |
FPConv | FPConv-master/datasets/scannet_dataset_rgb.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class ScannetDataset(torch_data.Dataset):
def __init__(self, root=None, npoints=10240, split='train', with_dropout=False, with_norm=False, with_rgb=False, sample_rate=None):
super().__init__()
print(' ---- load data from', root)
self.npoints = npoints
self.with_dropout = with_dropout
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
scene_points_id = pickle.load(fp)
num_point_all = pickle.load(fp)
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test':
self.labelweights = np.ones(21)
else:
raise ValueError('split must be train or eval.')
if sample_rate is not None:
num_point = npoints
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(self.scene_points_list)):
repeat_times = round(sample_prob[index] * num_iter)
repeat_times = int(max(repeat_times, 1))
room_idxs.extend([index] * repeat_times)
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
else:
self.room_idxs = np.arange(len(self.scene_points_list))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __getitem__(self, index):
index = self.room_idxs[index]
data_set = self.scene_points_list[index]
point_set = data_set[:, :3]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set, axis=0)
coordmin = np.min(point_set, axis=0)
smpmin = np.maximum(coordmax-[2, 2, 3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[2,2,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
# randomly choose a point as center point and sample <n_points> points in the box area of center-point
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter - [1, 1, 1.5]
curmax = curcenter + [1, 1, 1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set >= (curmin - 0.2)) * (point_set <= (curmax + 0.2)), axis=1) == 3
cur_point_set = point_set[curchoice, :]
cur_data_set = data_set[curchoice, :]
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg) == 0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.01)) * (cur_point_set <= (curmax + 0.01)), axis=1) == 3
vidx = np.ceil((cur_point_set[mask, :] - curmin) / (curmax - curmin) * [31.0, 31.0, 62.0])
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + vidx[:, 2])
isvalid = np.sum(cur_semantic_seg > 0) / len(cur_semantic_seg) >= 0.7 and len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
semantic_seg = cur_semantic_seg[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
selected_points = cur_data_set[choice, :] # np * 6, xyz + rgb
point_set = np.zeros((self.npoints, 9)) # xyz, norm_xyz, rgb
point_set[:, :3] = selected_points[:, :3] # xyz
for i in range(3): # normalized_xyz
point_set[:, 3 + i] = (selected_points[:, i] - coordmin[i]) / (coordmax[i] - coordmin[i])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
if self.with_dropout:
dropout_ratio = np.random.random() * 0.875 # 0 ~ 0.875
drop_idx = np.where(np.random.random((self.npoints)) <= dropout_ratio)[0]
point_set[drop_idx, :] = point_set[0, :]
semantic_seg[drop_idx] = semantic_seg[0]
sample_weight[drop_idx] *= 0
point_set = point_set[:, self.indices]
return point_set, semantic_seg, sample_weight
def __len__(self):
return len(self.room_idxs)
# return len(self.scene_points_list)
class ScannetDatasetWholeScene(torch_data.IterableDataset):
def __init__(self, root=None, npoints=10240, split='train', with_norm=True, with_rgb=True):
super().__init__()
print(' ---- load data from', root)
self.npoints = npoints
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet <whole scene> dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
self.temp_data = []
self.temp_index = 0
self.now_index = 0
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1 / np.log(1.2 + labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test':
self.labelweights = np.ones(21)
def get_data(self):
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
self.reset()
return self
def __next__(self):
if self.now_index >= len(self.scene_points_list) and self.temp_index >= len(self.temp_data):
raise StopIteration()
if self.temp_index < len(self.temp_data):
return self.get_data()
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
data_set_ini = self.scene_points_list[index]
point_set_ini = data_set_ini[:,:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini,axis=0)
coordmin = np.min(point_set_ini,axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/2).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/2).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
isvalid = False
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*2,j*2,0]
curmax = coordmin+[(i+1)*2,(j+1)*2,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini>=(curmin-0.2))*(point_set_ini<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set_ini[curchoice,:]
cur_data_set = data_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.001)) * (cur_point_set <= (curmax + 0.001)), axis=1) == 3
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=len(cur_semantic_seg) < self.npoints)
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
if sum(mask) / float(len(mask)) < 0.01:
continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
selected_points = cur_data_set[choice, :] # Nx6
point_set = np.zeros([self.npoints, 9])
point_set[:, :3] = selected_points[:, :3] # xyz
for k in range(3): # normalized_xyz
point_set[:, 3 + k] = (selected_points[:, k] - coordmin[k]) / (coordmax[k] - coordmin[k])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
point_set = point_set[:, self.indices]
self.temp_data.append((point_set, semantic_seg, sample_weight))
return self.get_data()
| 9,696 | 43.278539 | 135 | py |
FPConv | FPConv-master/utils/saver.py | import os
import torch
class Saver():
def __init__(self, save_dir, max_files=10):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.log_list = []
self.save_dir = save_dir
self.max_files = max_files
self.saver_log_path = os.path.join(save_dir, '.saver_log')
if os.path.isfile(self.saver_log_path):
with open(self.saver_log_path, 'r') as f:
self.log_list = f.read().splitlines()
def save_checkpoint(self, model, epoch, ckpt_name, best=False):
if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
state = {'epoch': epoch, 'model_state': model_state}
ckpt_name = '{}.pth'.format(ckpt_name)
save_path = os.path.join(self.save_dir, ckpt_name)
torch.save(state, save_path)
self.log_list.insert(0, save_path)
if len(self.log_list) > self.max_files:
pop_file = self.log_list.pop()
if pop_file != save_path:
if os.path.isfile(pop_file):
os.remove(pop_file)
with open(self.saver_log_path, 'w') as f:
for log in self.log_list:
f.write(log + '\n')
def load_checkpoint(self, model, filename):
if os.path.isfile(filename):
log_str("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_str("==> Done")
else:
raise FileNotFoundError
return epoch | 1,784 | 34.7 | 116 | py |
FPConv | FPConv-master/utils/switchnorm.py | import torch
import torch.nn as nn
def convert_sn(module, momentum=0.95):
module_output = module
if isinstance(module, torch.nn.BatchNorm3d):
module_output = SwitchNorm3d(module.num_features)
elif isinstance(module, torch.nn.BatchNorm2d):
module_output = SwitchNorm2d(module.num_features)
elif isinstance(module, torch.nn.BatchNorm1d):
module_output = SwitchNorm1d(module.num_features)
for name, child in module.named_children():
module_output.add_module(name, convert_sn(child, momentum))
del module
return module_output
class SwitchNorm1d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.95, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm1d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 3:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
x = x.unsqueeze(-1)
N, C, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x-mean) / (var+self.eps).sqrt()
x = x.view(N, C, H, W)
x = x * self.weight + self.bias
return x.squeeze(-1)
class SwitchNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.95, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm2d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
N, C, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x-mean) / (var+self.eps).sqrt()
x = x.view(N, C, H, W)
return x * self.weight + self.bias
class SwitchNorm3d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.997, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm3d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
N, C, D, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x - mean) / (var + self.eps).sqrt()
x = x.view(N, C, D, H, W)
return x * self.weight + self.bias
| 10,468 | 38.958015 | 104 | py |
MPMQA | MPMQA-master/evaluate.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import torch
from torch.nn.functional import normalize as norm
import deepspeed
import numpy as np
import torch.distributed as dist
from tqdm import tqdm
from math import ceil
# from torch.optim import Adam
from torch.utils.data import DataLoader
from collections import defaultdict
from utils import set_seed, get_logger, obj_to_cuda, load_ckpt, \
boardcast_str, gather_list, remove_repeat_sample, retrieval_eval, merge_recall, \
unique_index_and_value
from parser import get_base_parser
from dataset.mqa_dataset import get_mqa_loader
from dataset.mqa_page_contrast import MQAContrastDataset, mqa_contrast_collate_fn
from models.mqa_model import MQAT5Model
from models.utils import pad_features
from sklearn.metrics import precision_score, recall_score, f1_score
from collections import OrderedDict
from scripts.compute_metrics import compute_visual_answer_by_region_cls, compute_qa_score_by_region_cls
from nlgeval import NLGEval
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
nlgeval = NLGEval(no_skipthoughts=True, no_glove=True) # loads the models
def remove_punc(line):
return ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
def compute_visual_answer_metics(pred_related_regions, gt_regions, all_regions):
all_y_true = []
all_y_pred = []
instance_p = 0
instance_r = 0
instance_f1 = 0
no_preds = 0
for i, (instance_region_ids, instance_preds, instance_gts) in enumerate(zip(all_regions, pred_related_regions, gt_regions)):
y_true = []
y_pred = []
if len(instance_preds) == 0:
no_preds += 1
for _id in instance_region_ids:
if _id in instance_preds:
y_pred.append(1)
else:
y_pred.append(0)
if _id in instance_gts:
y_true.append(1)
else:
y_true.append(0)
instance_p += precision_score(y_true, y_pred, average='binary')
instance_r += recall_score(y_true, y_pred, average='binary')
instance_f1 += f1_score(y_true, y_pred, average='binary')
all_y_true.extend(y_true)
all_y_pred.extend(y_pred)
instance_p = instance_p / len(all_regions)
instance_r = instance_r / len(all_regions)
instance_f1 = instance_f1 / len(all_regions)
all_p = precision_score(all_y_true, all_y_pred, average='binary')
all_r = recall_score(all_y_true, all_y_pred, average='binary')
all_f1 = f1_score(all_y_true, all_y_pred, average='binary')
metrics = {
'instance_precision': instance_p,
'instance_recall': instance_r,
'instance_f1': instance_f1,
'all_precision': all_p,
'all_recall': all_r,
'all_f1': all_f1
}
if no_preds > 0:
print('#########################################')
print(f'{no_preds}/{len(all_regions)} instances has no predictions!!')
print('#########################################')
return metrics
def evaluate_page_contrast(args, model, page_contrast_dataset, logger, save_fn="temp.json"):
model.eval()
manuals = page_contrast_dataset.manuals
all_metrics = []
save_fn = ''.join(save_fn.split('.')[:-1])
qa2topimgs = OrderedDict()
for i, manual_name in tqdm(enumerate(manuals), total=len(manuals)):
torch.cuda.empty_cache()
page_contrast_dataset.set_manual(manual_name)
sampler = torch.utils.data.DistributedSampler(
page_contrast_dataset,
num_replicas=dist.get_world_size(),
rank=dist.get_rank(),
shuffle=False
)
sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=args.inf_batch_size,
drop_last=False
)
dataloader = DataLoader(
dataset=page_contrast_dataset,
shuffle=False,
batch_sampler=sampler,
num_workers=args.n_workers,
collate_fn=mqa_contrast_collate_fn
)
qaids = []
dataids = []
q_features = []
page_features = []
q_mask = []
page_mask = []
for j, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
batch = obj_to_cuda(batch)
qaids.extend(batch['qaids'])
dataids.extend(batch['dataids'])
with torch.no_grad():
return_hidden = (args.page_contrast_type == 'tokenwise')
try:
question_feature, context_feature = model.module.two_stream_encoding(return_hidden=return_hidden, **batch)
except RuntimeError:
torch.cuda.empty_cache()
question_feature, context_feature = model.module.two_stream_encoding(return_hidden=return_hidden, **batch)
if args.page_contrast_module_type is not None:
question_feature = model.module.page_contrast_module(question_feature)
context_feature = model.module.page_contrast_module(context_feature)
question_feature = norm(question_feature, dim=-1)
context_feature = norm(context_feature, dim=-1)
q_features.append(question_feature.detach().cpu())
page_features.append(context_feature.detach().cpu())
q_mask.append(batch['question_attn_mask'])
page_mask.append(batch['context_attn_mask'])
qaids = gather_list(qaids)
dataids = gather_list(dataids)
q_features = gather_list(q_features)
page_features = gather_list(page_features)
q_mask = gather_list(q_mask)
page_mask = gather_list(page_mask)
qaids, unique_qa_index = unique_index_and_value(qaids)
dataids, unique_page_index = unique_index_and_value(dataids)
if dist.get_rank() == 0:
if args.page_contrast_type == 'global':
q_features = torch.cat(q_features, dim=0)
page_features = torch.cat(page_features, dim=0)
q_features = q_features[unique_qa_index]
page_features = page_features[unique_page_index]
sim_matrix = torch.matmul(q_features, page_features.t())
metrics, qa2topimg = retrieval_eval(sim_matrix.float(), qaids, dataids,
page_contrast_dataset.qaid2dataid, page_contrast_dataset.dataid2qaids, return_top_imgs=True)
elif args.page_contrast_type == 'tokenwise':
q_features = pad_features(q_features)
page_features = pad_features(page_features)
q_mask = pad_features(q_mask)
page_mask = pad_features(page_mask)
q_features = q_features[unique_qa_index]
q_mask = q_mask[unique_qa_index]
page_features = page_features[unique_page_index]
page_mask = page_mask[unique_page_index]
# sim_matrix = torch.matmul(q_features, page_features.t())
with torch.no_grad():
try:
sim_matrix_qc, sim_matrix_cq = model.module.similarity_score(q_features.cuda(), page_features.cuda(), q_mask.cuda(), page_mask.cuda())
except RuntimeError:
torch.cuda.empty_cache()
sim_matrix_qc, sim_matrix_cq = model.module.similarity_score(q_features, page_features, q_mask, page_mask)
sim_matrix_qc, sim_matrix_cq = sim_matrix_qc.float(), sim_matrix_cq.float()
metrics, qa2topimg = retrieval_eval(sim_matrix_qc, qaids, dataids,
page_contrast_dataset.qaid2dataid, page_contrast_dataset.dataid2qaids, sim_matrix_cq, return_top_imgs=True)
sim_matrix = sim_matrix_qc
del sim_matrix_cq
assert len(qa2topimgs.keys() & qa2topimg.keys()) == 0
assert len(qa2topimg.keys() & set(qaids)) == len(qa2topimg.keys()) == len(qaids)
qa2topimgs.update(qa2topimg)
print(f'Manual: {manual_name}')
for metric, score in metrics.items():
print(f'{metric}: {score:.3f}')
all_metrics.append(metrics)
predict_dir = os.path.join(args.output_dir, 'predict', page_contrast_dataset.split, 'page_contrast', save_fn, manual_name)
os.makedirs(predict_dir, exist_ok=True)
with open(os.path.join(predict_dir, 'metrics.json'), 'w') as f:
json.dump(metrics, f, indent=1)
with open(os.path.join(predict_dir, 'qaids.json'), 'w') as f:
json.dump(qaids, f, indent=1)
with open(os.path.join(predict_dir, 'dataids.json'), 'w') as f:
json.dump(dataids, f, indent=1)
with open(os.path.join(predict_dir, 'qaid2dataid.json'), 'w') as f:
json.dump(page_contrast_dataset.qaid2dataid, f, indent=1)
with open(os.path.join(predict_dir, 'dataid2qaids.json'), 'w') as f:
json.dump(page_contrast_dataset.dataid2qaids, f, indent=1)
np.save(os.path.join(predict_dir, 'score_matrix.npy'), sim_matrix.detach().cpu().numpy())
else:
metrics = defaultdict(int)
if dist.get_rank() == 0:
merged_metrics = merge_recall(all_metrics)
predict_dir = os.path.join(args.output_dir, 'predict', page_contrast_dataset.split, 'page_contrast', save_fn)
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, 'all.json')
with open(path, 'w') as f:
json.dump(merged_metrics, f, indent=1)
path = os.path.join(predict_dir, 'qa2topimgs.json')
with open(path, 'w') as f:
json.dump(qa2topimgs, f, indent=1)
logger.info('Average page retrieval performance')
for metric, score in merged_metrics.items():
logger.info(f'{metric}: {score:.3f}')
return merged_metrics
else:
return defaultdict(int)
def evaluate_visual_answer(args, model, val_loader, logger, sd_save_fn='temp.json', split='val'):
model.eval()
pred_related_regions = []
gt_regions = []
qa_ids = []
all_regions = []
for step, batch in tqdm(enumerate(val_loader), ncols=50, total=len(val_loader)):
batch = obj_to_cuda(batch)
with torch.no_grad():
pred_related_region = model.module.visual_answer_inference(**batch)
qa_ids.extend(batch['qa_ids'])
pred_related_regions.extend(pred_related_region)
gt_regions.extend(batch['related_regions'])
all_regions.extend(list(batch['region_positions'][i].keys()) for i in range(len(batch['qa_ids'])))
# Remove repeat samples
N_samples = len(val_loader.dataset)
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
qa_ids = qa_ids[:samples_per_rank]
pred_related_regions = pred_related_regions[:samples_per_rank]
gt_regions = gt_regions[:samples_per_rank]
all_regions = all_regions[:samples_per_rank]
qa_ids_list = [None] * dist.get_world_size()
dist.all_gather_object(qa_ids_list, qa_ids)
pred_related_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(pred_related_regions_list, pred_related_regions)
gt_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(gt_regions_list, gt_regions)
all_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(all_regions_list, all_regions)
if dist.get_rank() == 0:
qa_ids, pred_related_regions, gt_regions, all_regions = [], [], [], []
gt_region_types, pred_region_types, all_region_types = [], [], []
for i in range(dist.get_world_size()):
qa_ids.extend(qa_ids_list[i])
pred_related_regions.extend(pred_related_regions_list[i])
gt_regions.extend(gt_regions_list[i])
all_regions.extend(all_regions_list[i])
for rids in gt_regions:
gt_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
for rids in pred_related_regions:
pred_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
for rids in all_regions:
all_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
predict_items = []
for qa_id, pred_related_region, gt_region, all_region, gt_region_type, pred_region_type, all_region_type in zip(qa_ids, pred_related_regions, gt_regions, all_regions, gt_region_types, pred_region_types, all_region_types):
predict_items.append({
'image_id': qa_id,
'pred_regions': pred_related_region,
'pred_region_cls': pred_region_type,
'gt_regions': gt_region,
'gt_region_cls': gt_region_type,
'all_regions': all_region,
'all_region_cls': all_region_type
})
predict_dir = os.path.join(args.output_dir, 'predict', split, 'related_regions')
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, sd_save_fn)
with open(path, 'w') as f:
json.dump(predict_items, f, indent=1)
metrics = compute_visual_answer_metics(pred_related_regions, gt_regions, all_regions)
cls_metrics = compute_visual_answer_by_region_cls(predict_items, is_print=False)
cls_metrics['All'] = metrics
path = os.path.join(predict_dir, sd_save_fn.replace('.json', '_metrics.json'))
with open(path, 'w') as f:
json.dump(cls_metrics, f, indent=1)
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
else:
return defaultdict(int)
def evaluate_question_answer(args, model, val_loader, logger, save_fn='temp.json', split='val'):
predictions = []
questions = []
gts = []
gt_regions = []
qa_ids = []
predict_items = []
image_paths = []
model.eval()
for step, batch in tqdm(enumerate(val_loader), ncols=50, total=len(val_loader)):
batch = obj_to_cuda(batch)
with torch.no_grad():
if args.beam_size is None or args.beam_size <= 1:
_, prediction = model.module.greedy_inference(**batch)
else:
_, prediction = model.module.beam_search(beam_size=args.beam_size, length_penalty=args.length_penalty, **batch)
predictions.extend(prediction)
qa_ids.extend(batch['qa_ids'])
image_paths.extend(batch['image_paths'])
questions.extend(model.module.tokenizer.batch_decode(batch['question_ids'], skip_special_tokens=True))
gts.extend(model.module.tokenizer.batch_decode(batch['answer_ids'], skip_special_tokens=True))
gt_regions.extend(batch['related_regions'])
for qa_id, image_path, question, predict, gt, gt_region in zip(qa_ids, image_paths, questions, predictions, gts, gt_regions):
predict_items.append({
'image_id': qa_id,
'image_path': image_path,
'question': question,
'caption': predict,
'gt': gt,
'gt_regions': gt_region,
'gt_region_cls': [val_loader.dataset.rid2cls[r] for r in gt_region if r in val_loader.dataset.rid2cls]
})
# Remove repeat samples
N_samples = len(val_loader.dataset)
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
predict_items = predict_items[:samples_per_rank]
predict_list = [None] * dist.get_world_size()
dist.all_gather_object(predict_list, predict_items)
if dist.get_rank() == 0:
candidates = []
for predict in predict_list:
candidates.extend(predict)
# assert len(candidates) == N_samples
try:
candidates.sort(key=lambda x: int(x['image_id']))
except:
candidates.sort(key=lambda x: x['image_id'])
predict_dir = os.path.join(args.output_dir, 'predict', split)
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, save_fn)
with open(path, 'w') as f:
json.dump(candidates, f, indent=1)
all_predictions = [x['caption'] for x in candidates]
all_answers = [x['gt'] for x in candidates]
all_predictions = [remove_punc(sent).lower() for sent in all_predictions]
all_answers = [[remove_punc(sent).lower() for sent in all_answers]]
metrics = nlgeval.compute_metrics(all_answers, all_predictions)
metrics_divide_by_cls = compute_qa_score_by_region_cls(candidates, is_print=False)
metrics_divide_by_cls['All'] = metrics
path = os.path.join(predict_dir, save_fn.replace('.json', '_metrics.json'))
with open(path, 'w') as f:
json.dump(metrics_divide_by_cls, f, indent=1)
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
else:
return defaultdict(int)
def evaluate_ds(args, model, val_loader, logger, save_fn='temp.json', split='val'):
logger.info(f'Evaluating on {split} split...')
metrics = defaultdict(int)
if args.page_contrast:
dataset = MQAContrastDataset(args, args.root, model.module.tokenizer, split)
recall_metrics = evaluate_page_contrast(args, model, dataset, logger, save_fn=save_fn)
metrics.update(recall_metrics)
torch.cuda.empty_cache()
if args.use_retrieved_qa2dataid:
dist.barrier()
val_loader.dataset.set_use_retrieved_qa2dataid()
if args.visual_answer:
sd_metrics = evaluate_visual_answer(args, model, val_loader, logger, sd_save_fn=save_fn, split=split)
metrics.update(sd_metrics)
torch.cuda.empty_cache()
if args.text_answer:
qa_metrics = evaluate_question_answer(args, model, val_loader, logger, save_fn=save_fn, split=split)
metrics.update(qa_metrics)
torch.cuda.empty_cache()
if dist.get_rank() == 0:
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
def main(args):
set_seed(args.seed)
torch.cuda.set_device(args.local_rank)
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, 'log.txt'))
logger.info(args)
if args.deepspeed:
deepspeed.init_distributed()
nowtime = None
if not args.deepspeed or (args.deepspeed and dist.get_rank() == 0):
nowtime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
os.makedirs(os.path.join(args.output_dir, 'eval_opt', nowtime), exist_ok=True)
with open(os.path.join(args.output_dir, 'eval_opt', nowtime, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=1, ensure_ascii=False)
if args.deepspeed_config is not None:
os.system(f'cp {args.deepspeed_config} {os.path.join(args.output_dir, "eval_opt", nowtime)}')
if args.deepspeed:
nowtime = boardcast_str(nowtime, src_rank=0)
logger = get_logger(os.path.join(args.output_dir, 'eval_opt', nowtime, f'log.{dist.get_rank()}.txt'))
else:
logger = get_logger(os.path.join(args.output_dir, 'eval_opt', nowtime, f'log.txt'))
logger.info(args)
model = MQAT5Model(args, pretrained_dir=args.pretrained_dir)
split2loader = OrderedDict()
if isinstance(args.eval_set, str):
splits = [args.eval_set]
else:
splits = args.eval_set
for split in splits:
split2loader[split] = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split=split, num_workers=args.n_workers, eval_on_train=True)
model.resize_token_embeddings()
model.cuda()
if args.deepspeed:
model, _, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
)
if args.checkpoint:
load_ckpt(args, args.checkpoint, model, logger=logger, load_module_only=True)
for split, val_loader in split2loader.items():
evaluate_ds(args, model, val_loader, logger, save_fn=args.save_fn, split=split)
if __name__ == '__main__':
parser = get_base_parser()
parser.add_argument('--save_fn', type=str, default='temp.json')
parser.add_argument('--sd_save_fn', type=str, default='temp.json')
args = parser.parse_args()
if args.config is not None:
args_dict = json.load(open(args.config, 'r', encoding='utf-8'))
for key, value in args_dict.items():
if key == 'local_rank':
continue
setattr(args, key, value)
main(args)
| 21,617 | 42.761134 | 229 | py |
MPMQA | MPMQA-master/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import random
import logging
import argparse
import deepspeed
import numpy as np
from math import ceil
import torch.distributed as dist
from collections import OrderedDict, defaultdict
def obj_to_cuda(obj):
if isinstance(obj, torch.Tensor):
return obj.cuda()
elif isinstance(obj, list):
return [obj_to_cuda(t) for t in obj]
elif isinstance(obj, tuple):
return (obj_to_cuda(t) for t in obj)
elif isinstance(obj, dict):
return {key: obj_to_cuda(t) for key, t in obj.items()}
else:
return obj
def save_ckpt(args, model, optimizer, output_dir, epoch, logger):
os.makedirs(os.path.join(output_dir, 'ckpts'), exist_ok=True)
ckpt_path = os.path.join(output_dir, 'ckpts', f'checkpoint.{epoch}')
logger.info(f'Saving checkpoint {ckpt_path}...')
if args.deepspeed:
ckpt_path = ckpt_path.rstrip('/')
tag = ckpt_path.split('/')[-1]
load_dir = '/'.join(ckpt_path.split('/')[:-1])
model.save_checkpoint(load_dir, tag)
else:
checkpoint = OrderedDict()
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
checkpoint['epoch'] = epoch
torch.save(checkpoint, ckpt_path, _use_new_zipfile_serialization=False)
def load_ckpt(args, ckpt_path, model, optimizer=None, logger=None, load_module_only=False):
if logger is not None:
logger.info(f'Loading model from {ckpt_path}')
if args.deepspeed:
ckpt_path = ckpt_path.rstrip('/')
tag = ckpt_path.split('/')[-1]
load_dir = '/'.join(ckpt_path.split('/')[:-1])
model.load_checkpoint(load_dir, tag, load_module_only=load_module_only,
load_module_strict=False,
load_optimizer_states=(not load_module_only),
load_lr_scheduler_states=(not load_module_only))
else:
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['model'])
if optimizer is not None and not load_module_only:
if logger is not None:
logger.info(f'Loading optimizer')
optimizer.load_state_dict(ckpt['optimizer'])
@torch.no_grad()
def retrieval_eval(score_matrix, txt_ids, img_ids, txt2img, img2txts, score_matrix_2=None, return_top_imgs=False):
# image retrieval
img2j = {i: j for j, i in enumerate(img_ids)}
_, rank_txt = score_matrix.topk(min(10, score_matrix.size(1)), dim=1)
txt2topimg = OrderedDict()
topimgs = rank_txt[:, 0]
for i, txt_id in enumerate(txt_ids):
txt2topimg[txt_id] = img_ids[topimgs[i]]
if score_matrix.size(1) < 10:
print(f'WARNING: find {score_matrix.size(1)} candidate images, less than 10.')
gt_img_j = torch.LongTensor([img2j[txt2img[txt_id]]
for txt_id in txt_ids],
).to(rank_txt.device
).unsqueeze(1).expand_as(rank_txt)
rank = (rank_txt == gt_img_j).nonzero()[:,1]
if rank.numel():
ir_r1 = (rank < 1).sum().item() / len(txt_ids)
ir_r3 = (rank < 3).sum().item() / len(txt_ids)
ir_r5 = (rank < 5).sum().item() / len(txt_ids)
ir_r10 = (rank < 10).sum().item() / len(txt_ids)
else:
ir_r1, ir_r3, ir_r5, ir_r10 = 0, 0, 0, 0
# text retrieval
txt2i = {t: i for i, t in enumerate(txt_ids)}
if score_matrix_2 is not None:
score_matrix = score_matrix_2.t()
_, rank_img = score_matrix.topk(min(10, score_matrix.size(0)), dim=0)
if score_matrix.size(0) < 10:
print(f'WARNING: find {score_matrix.size(0)} candidate txts, less than 10.')
tr_r1, tr_r3, tr_r5, tr_r10 = 0, 0, 0, 0
for j, img_id in enumerate(img_ids):
gt_is = [txt2i[t] for t in img2txts[img_id]]
ranks = [(rank_img[:, j] == i).nonzero() for i in gt_is]
rank = min([10] + [r.item() for r in ranks if r.numel()])
if rank < 1:
tr_r1 += 1
if rank < 3:
tr_r3 += 1
if rank < 5:
tr_r5 += 1
if rank < 10:
tr_r10 += 1
tr_r1 /= len(img_ids)
tr_r3 /= len(img_ids)
tr_r5 /= len(img_ids)
tr_r10 /= len(img_ids)
# tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
# ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
tr_mean = (tr_r1 + tr_r3 + tr_r5) / 3
ir_mean = (ir_r1 + ir_r3 + ir_r5) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_log = {'qa2page_r1': ir_r1,
'qa2page_r3': ir_r3,
'qa2page_r5': ir_r5,
'qa2page_r10': ir_r10,
'qa2page_r_mean': ir_mean,
'qa_nums': len(txt_ids),
'page2qa_r1': tr_r1,
'page2qa_r3': tr_r3,
'page2qa_r5': tr_r5,
'page2qa_r10': tr_r10,
'page2qa_r_mean': tr_mean,
'page_nums': len(img_ids),
'r_mean': r_mean,
}
if return_top_imgs:
return eval_log, txt2topimg
else:
return eval_log
def merge_recall(all_metrics):
merged_metrics = defaultdict(int)
all_qa_nums = sum([m['qa_nums'] for m in all_metrics])
all_page_nums = sum([m['page_nums'] for m in all_metrics])
merged_metrics['qa_nums'] = all_qa_nums
merged_metrics['page_nums'] = all_page_nums
for name in ['qa2page_r1', 'qa2page_r3', 'qa2page_r5', 'qa2page_r10', 'qa2page_r_mean']:
merged_metrics[name] = sum([m[name]*m['page_nums'] for m in all_metrics]) / all_page_nums
for name in ['page2qa_r1', 'page2qa_r3', 'page2qa_r5', 'page2qa_r10', 'page2qa_r_mean']:
merged_metrics[name] = sum([m[name]*m['qa_nums'] for m in all_metrics]) / all_qa_nums
merged_metrics['r_mean'] = (merged_metrics['qa2page_r_mean']+merged_metrics['page2qa_r_mean']) / 2
return merged_metrics
def harmonic_mean(data):
total = 0
for i in data:
if i == 0:
return 0
total += 1/i
return len(data) / total
def boardcast_str(s, src_rank=0):
object_list = [s]
dist.broadcast_object_list(object_list=object_list, src=src_rank)
return object_list[0]
def gather_list(list_to_gather):
results = [None] * dist.get_world_size()
dist.all_gather_object(results, list_to_gather)
open_nest_results = []
for result in results:
open_nest_results.extend(result)
return open_nest_results
def gather_tensor(tensor_to_gather):
results = [None] * dist.get_world_size()
dist.all_gather_object(results, tensor_to_gather)
return results
def remove_repeat_sample(list_to_process, N_samples):
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
return list_to_process[:samples_per_rank]
def unique_index_and_value(dataids):
unique_dataids = []
unique_index = []
hashset = set()
for index, dataid in enumerate(dataids):
if dataid not in hashset:
hashset.add(dataid)
unique_dataids.append(dataid)
unique_index.append(index)
dataids = unique_dataids
return unique_dataids, unique_index
def set_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
| 8,686 | 35.965957 | 114 | py |
MPMQA | MPMQA-master/train.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import torch
import deepspeed
import torch.distributed as dist
from tqdm import tqdm
from utils import set_seed, get_logger, obj_to_cuda, save_ckpt, load_ckpt, boardcast_str, harmonic_mean
from parser import get_base_parser
from dataset.mqa_dataset import get_mqa_loader
from models.mqa_model import MQAT5Model
from evaluate import evaluate_ds
def train(args, model, train_loader, val_loader, test_loader, optimizer, logger, val_metric='ROUGE_L'):
logger.info('Start training')
total_step = 0
best_epoch = -1
best_score = -1
best_ckpt_path = None
ckpt_dir = os.path.join(args.output_dir, 'ckpts')
start_epoch = args.start_epoch
if args.checkpoint:
logger.info(f'Resume training from {args.checkpoint}')
logger.info(f'Start epoch {start_epoch}')
# assert args.start_epoch != 0
load_ckpt(args, args.checkpoint, model, logger, load_module_only=args.load_module_only)
metrics = evaluate_ds(args, model, val_loader, logger, save_fn=f'epoch_{start_epoch}.json', split='val')
if isinstance(val_metric, str):
val_score = metrics[val_metric]
elif isinstance(val_metric, list):
scores = [metrics[n] for n in val_metric]
if args.val_metric_aggregate == 'harmonic_mean':
val_score = harmonic_mean(scores)
elif args.val_metric_aggregate == 'mean':
val_score = sum(scores) / len(scores)
if ((not args.deepspeed) or dist.get_rank()==0) and val_score > best_score:
best_score = val_score
best_epoch = start_epoch
best_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{best_epoch}')
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} score {val_metric}: {best_score}')
logger.info(f'Best checkpoint at {best_ckpt_path}')
# use for schedule sampling
total_step = (args.epoch - start_epoch) * len(train_loader)
now_step = start_epoch * len(train_loader)
for epoch in range(start_epoch, args.epoch):
model.train()
# model.eval() # for debug, DO NOT forget to remove
model.module.roi_extractor.eval()
# Set epoch must be called, otherwise the order of data in each epoch is the same
train_loader.sampler.set_epoch(epoch)
for step, batch in tqdm(enumerate(train_loader), ncols=50, total=len(train_loader)):
batch = obj_to_cuda(batch)
loss_dict = model(**batch, now_step=now_step, total_step=total_step)
loss = loss_dict['loss']
if args.deepspeed:
model.backward(loss)
model.step()
else:
optimizer.zero_grad()
loss.backward()
optimizer.step()
now_step += 1
if now_step % 100 == 0:
if args.deepspeed:
logger.info(f'Epoch: {epoch+1}/{args.epoch}, step: {step}/{len(train_loader)}, loss: {float(loss.detach().cpu())}')
else:
logger.info(f'Epoch: {epoch+1}/{args.epoch}, step: {step}/{len(train_loader)}, lr: {min(optimizer.get_lr())}-{max(optimizer.get_lr())}, loss: {float(loss.detach().cpu())}')
for loss_name, loss_value in loss_dict.items():
if loss_name == 'loss':
continue
logger.info(f'{loss_name}: {float(loss_value.detach().cpu())}')
if args.debug:
break
save_ckpt(args, model, None, args.output_dir, epoch=epoch+1, logger=logger)
metrics = evaluate_ds(args, model, val_loader, logger, save_fn=f'epoch_{epoch+1}.json', split='val')
if isinstance(val_metric, str):
val_score = metrics[val_metric]
elif isinstance(val_metric, list):
scores = [metrics[n] for n in val_metric]
val_score = harmonic_mean(scores)
elif args.val_metric_aggregate == 'mean':
val_score = sum(scores) / len(scores)
if val_score > best_score and ((not args.deepspeed) or dist.get_rank()==0):
best_score = val_score
best_epoch = epoch+1
best_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{best_epoch}')
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} score {val_metric}: {best_score}')
logger.info(f'Best checkpoint at {best_ckpt_path}')
# If the checkpoint of previous epoch did not perform best, remove it.
if args.save_best_last and ((not args.deepspeed) or dist.get_rank()==0):
previous_epoch = epoch
previous_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{previous_epoch}')
if previous_epoch > start_epoch and previous_ckpt_path != best_ckpt_path:
logger.info(f'Remove {previous_ckpt_path} that does not preform best.')
cmd = f'rm -r {previous_ckpt_path}'
logger.info(f'Execute command: \n{cmd}')
os.system(cmd)
if (not args.deepspeed) or dist.get_rank() == 0:
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} mean score {val_metric}: {best_score}')
logger.info(f'Load checkpoint {best_ckpt_path} to perform testing')
best_ckpt_path = boardcast_str(best_ckpt_path, src_rank=0)
load_ckpt(args, best_ckpt_path, model, logger=logger, load_module_only=True)
del train_loader
del val_loader
del optimizer
torch.cuda.empty_cache()
if args.deepspeed:
metrics = evaluate_ds(args, model, test_loader, logger, save_fn=f'epoch_{best_epoch}.json', split='test')
def get_parameter_group(args, model):
'''Get optimize parameter group;
'''
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
weight_decay = args.weight_decay
optimizer_grouped_parameters = [
{'params': [p for _, p in decay_param_tp], 'weight_decay': weight_decay},
{'params': [p for _, p in no_decay_param_tp], 'weight_decay': 0.0},
]
return optimizer_grouped_parameters
def main(args):
set_seed(args.seed)
torch.cuda.set_device(args.local_rank)
os.makedirs(args.output_dir, exist_ok=True)
if args.deepspeed:
deepspeed.init_distributed()
nowtime = None
# Saving arguments
if not args.deepspeed or (args.deepspeed and dist.get_rank() == 0):
nowtime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
os.makedirs(os.path.join(args.output_dir, 'opt', nowtime), exist_ok=True)
with open(os.path.join(args.output_dir, 'opt', nowtime, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=1, ensure_ascii=False)
if args.deepspeed_config is not None:
os.system(f'cp {args.deepspeed_config} {os.path.join(args.output_dir, "opt", nowtime)}')
if args.deepspeed:
nowtime = boardcast_str(nowtime, src_rank=0)
logger = get_logger(os.path.join(args.output_dir, 'opt', nowtime, f'log.{dist.get_rank()}.txt'))
else:
logger = get_logger(os.path.join(args.output_dir, 'opt', nowtime, f'log.txt'))
logger.info(json.dumps(vars(args), indent=2))
model = MQAT5Model(args, pretrained_dir=args.pretrained_dir)
train_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split='train', num_workers=args.n_workers)
val_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split='val', num_workers=args.n_workers)
test_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.inf_batch_size, split='test', num_workers=args.n_workers)
model.resize_token_embeddings()
model.cuda()
if args.deepspeed:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=get_parameter_group(args, model)
)
train(args, model, train_loader, val_loader, test_loader, optimizer, logger, val_metric=args.val_metric)
if __name__ == '__main__':
parser = get_base_parser()
args = parser.parse_args()
if args.config is not None:
args_dict = json.load(open(args.config, 'r', encoding='utf-8'))
for key, value in args_dict.items():
if key == 'local_rank':
continue
setattr(args, key, value)
if args.debug:
args.output_dir = "expr/debug"
main(args) | 9,408 | 45.122549 | 192 | py |
MPMQA | MPMQA-master/detector/setup.py | #!/usr/bin/env python
import glob
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "bua","caffe", "modeling","layers", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"bua.caffe.modeling._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="bottom-up-attention.pytorch",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 1,911 | 27.537313 | 100 | py |
MPMQA | MPMQA-master/detector/train_det.py | import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
import bua.d2.modeling.roi_heads
from bua import add_config
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from bua.d2 import (
build_detection_test_loader_with_attributes,
build_detection_train_loader_with_attributes
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from dataset import publaynet, balloon, vg
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
if cfg.MODEL.ATTRIBUTE_ON:
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name)
else:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
os.makedirs(os.path.join(cfg.OUTPUT_DIR, 'ckpts'), exist_ok=True)
checkpointer = DetectionCheckpointer(
model, os.path.join(cfg.OUTPUT_DIR, 'ckpts'), optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
# writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
if cfg.MODEL.ATTRIBUTE_ON:
data_loader = build_detection_train_loader_with_attributes(cfg)
else:
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
# storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
# if iteration - start_iter > 5 and (
# (iteration + 1) % 20 == 0 or iteration == max_iter - 1
# ):
# for writer in writers:
# writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
args.__setattr__('mode', 'd2')
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | 7,622 | 35.3 | 99 | py |
MPMQA | MPMQA-master/detector/ROIFeatExtractor.py | import numpy as np
import cv2
import torch
import torch.nn as nn
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.structures import ImageList, Boxes
from detectron2.checkpoint import DetectionCheckpointer
import sys
sys.path.append('detector')
from bua.d2 import add_attribute_config
class ROIFeatExtractor(nn.Module):
def __init__(self, model_cfg, weights, bua=False):
super().__init__()
self.cfg = get_cfg()
self.bua = bua
if self.bua:
add_attribute_config(self.cfg)
self.cfg.merge_from_file(model_cfg)
self.cfg.MODEL.WEIGHTS = weights
self.model = build_model(self.cfg)
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(self.cfg.MODEL.WEIGHTS)
def preprocess_images(self, images):
images = [img.to(self.model.device) for img in images]
# import pdb;pdb.set_trace()
images = [img.permute(2, 0, 1) for img in images]
images = [(x - self.model.pixel_mean) / self.model.pixel_std for x in images]
images = ImageList.from_tensors(images, self.model.backbone.size_divisibility)
return images
def convert_bbox(self, bboxes):
bboxes = [Boxes(b.to(self.model.device)) for b in bboxes]
return bboxes
def roi_align(self, grid_features, bboxes):
nbbs = [len(b) for b in bboxes]
if self.bua:
box_features = self.model.roi_heads._shared_roi_transform(
[grid_features[f] for f in self.model.roi_heads.in_features],
bboxes)
box_features = box_features.mean(dim=[2, 3])
else:
grid_features = [grid_features[f] for f in self.model.roi_heads.box_in_features]
box_features = self.model.roi_heads.box_pooler(grid_features, bboxes)
box_features = self.model.roi_heads.box_head(box_features)
box_features = box_features.split(nbbs)
return box_features
def forward(self, images, bboxes):
"""
args:
image - BGR image list [H x W x C, ]
bboxes - Boxes of each image, list [N x 4,]
"""
images = self.preprocess_images(images)
bboxes = self.convert_bbox(bboxes)
grid_features = self.model.backbone(images.tensor)
roi_features = self.roi_align(grid_features, bboxes)
return roi_features
def predict(self, roi_features):
predictions = []
logits = []
for f in roi_features:
logits.append(
self.model.roi_heads.box_predictor(f)[0]#.argmax(dim=-1)
)
for l in logits:
predictions.append(
l[:, :-1].argmax(dim=-1)
)
return predictions, logits
if __name__ == '__main__':
# extractor = ROIFeatExtrator('expr/vg-rcnn-3x/config.yaml', 'expr/vg-rcnn-3x/output/ckpts/model_final.pth')
extractor = ROIFeatExtrator('expr/vg-bua/config.yaml', 'pretrained/bua-d2-frcn-r101.pth', bua=True)
extractor.eval()
images = [torch.from_numpy(cv2.imread('2344092.jpg'))]
bboxes = [torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2344092/boxes.npy'))]
classes = [torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2344092/classes.npy'))]
images.append(torch.from_numpy(cv2.imread('data/VisualGenome/VG_100K/2368275.jpg')))
bboxes.append(torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2368275/boxes.npy')))
classes.append(torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2368275/classes.npy')))
with torch.no_grad():
roi_features = extractor(images, bboxes)
predictions, logits = extractor.predict(roi_features)
# predictions = predictions.detach().cpu()
import pdb;pdb.set_trace()
for c1, p1 in zip(classes, predictions):
if all(c1 == p1.detach().cpu()) != True:
import pdb;pdb.set_trace()
| 4,030 | 37.390476 | 112 | py |
MPMQA | MPMQA-master/detector/evaluation/vg_evaluation.py | import os, io
import numpy as np
import copy
import torch
import logging
import pickle as cPickle
import itertools
import contextlib
from pycocotools.coco import COCO
from collections import OrderedDict
from fvcore.common.file_io import PathManager
import detectron2.utils.comm as comm
from detectron2.data import MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.evaluation.coco_evaluation import instances_to_coco_json
from .vg_eval import vg_eval
class VGEvaluator(DatasetEvaluator):
"""
Evaluate object proposal, instance detection
outputs using VG's metrics and APIs.
"""
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
cfg (CfgNode): config instance
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instance_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
"""
self._tasks = self._tasks_from_config(cfg)
self._distributed = distributed
self._logger = logging.getLogger(__name__)
self._cpu_device = torch.device("cpu")
self._output_dir = output_dir
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.warning(f"json_file was not found in MetaDataCatalog for '{dataset_name}'")
cache_path = os.path.join(output_dir, f"{dataset_name}_vg_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
self._classes = ['__background__']
self._class_to_ind = {}
self._class_to_ind[self._classes[0]] = 0
with open(os.path.join('evaluation/objects_vocab.txt')) as f:
count = 1
for object in f.readlines():
names = [n.lower().strip() for n in object.split(',')]
self._classes.append(names[0])
for n in names:
self._class_to_ind[n] = count
count += 1
# Load attributes
self._attributes = ['__no_attribute__']
self._attribute_to_ind = {}
self._attribute_to_ind[self._attributes[0]] = 0
with open(os.path.join('evaluation/attributes_vocab.txt')) as f:
count = 1
for att in f.readlines():
names = [n.lower().strip() for n in att.split(',')]
self._attributes.append(names[0])
for n in names:
self._attribute_to_ind[n] = count
count += 1
self.roidb, self.image_index = self.gt_roidb(self._coco_api)
def _tasks_from_config(self, cfg):
"""
Returns:
tuple[str]: tasks that can be evaluated under the given configuration.
"""
tasks = ("bbox",)
if cfg.MODEL.MASK_ON:
tasks = tasks + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
tasks = tasks + ("keypoints",)
return tasks
def gt_roidb(self, dataset):
roidb = []
image_index = dataset.imgToAnns.keys()
for img_index in dataset.imgToAnns:
tmp_dict = {}
num_objs = len(dataset.imgToAnns[img_index])
bboxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_attributes = np.zeros((num_objs, 16), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
for ind, item in enumerate(dataset.imgToAnns[img_index]):
bboxes[ind, :] = item['bbox']
gt_classes[ind] = item['category_id'] + 1 # NOTE
for j, attr in enumerate(item['attribute_ids']):
gt_attributes[ind, j] = attr
bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1]
tmp_dict['boxes'] = bboxes
tmp_dict['gt_attributes'] = gt_attributes
tmp_dict['gt_classes'] = gt_classes
roidb.append(tmp_dict)
return roidb, image_index
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["boxes"] = instances.pred_boxes.tensor.numpy()
prediction["labels"] = instances.pred_classes.numpy()
prediction["scores"] = instances.scores.numpy()
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
self._predictions = comm.gather(self._predictions, dst=0)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return {}
# self._predictions = torch.load(os.path.join(self._output_dir, "instances_predictions.pth"))
if len(self._predictions) == 0:
self._logger.warning("[VGEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self._predictions, f)
self._results = OrderedDict()
self._eval_vg()
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_vg(self):
self.write_voc_results_file(self._predictions, output_dir=self._output_dir)
self.do_python_eval(self._output_dir)
def write_voc_results_file(self, predictions, output_dir):
# preds = []
# for item in predictions:
# pred = {}
# pred['image_id'] = item['image_id']
# scores = item["scores"]
# labels = item["labels"]
# bbox = item["boxes"]
# for ind, instance in enumerate(item['instances']):
# scores[ind] = instance['score']
# labels[ind] = instance['category_id']
# bbox[ind, :] = instance['bbox'][:]
# pred['scores'] = scores
# pred['lables'] = labels
# pred['bbox'] = bbox
# preds.append(pred)
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
print('Writing "{}" vg result file'.format(cls))
filename = self.get_vg_results_file_template(output_dir).format(cls)
with open(filename, 'wt') as f:
for pred_ind, item in enumerate(predictions):
scores = item["scores"]
labels = item["labels"]+1
bbox = item["boxes"]
if cls_ind not in labels:
continue
dets = bbox[labels==cls_ind]
scores = scores[labels==cls_ind]
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(str(item["image_id"]), scores[k],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def get_vg_results_file_template(self, output_dir, pickle=True, eval_attributes = False):
filename = 'detections_vg'+'_{:s}.txt'
path = os.path.join(output_dir, filename)
return path
def do_python_eval(self, output_dir, pickle=True, eval_attributes = False):
# We re-use parts of the pascal voc python code for visual genome
aps = []
nposs = []
thresh = []
# The PASCAL VOC metric changed in 2010
use_07_metric = False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Load ground truth
if eval_attributes:
classes = self._attributes
else:
classes = self._classes
for i, cls in enumerate(classes):
if cls == '__background__' or cls == '__no_attribute__':
continue
filename = self.get_vg_results_file_template(output_dir).format(cls)
rec, prec, ap, scores, npos = vg_eval(
filename, self.roidb, self.image_index, i, ovthresh=0.5,
use_07_metric=use_07_metric, eval_attributes=eval_attributes)
# Determine per class detection thresholds that maximise f score
if npos > 1 and not (type(prec) == int and type(rec) == int and prec+rec ==0):
f = np.nan_to_num((prec * rec) / (prec + rec))
thresh += [scores[np.argmax(f)]]
else:
thresh += [0]
aps += [ap]
nposs += [float(npos)]
print('AP for {} = {:.4f} (npos={:,})'.format(cls, ap, npos))
if pickle:
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap,
'scores': scores, 'npos': npos}, f)
# Set thresh to mean for classes with poor results
thresh = np.array(thresh)
avg_thresh = np.mean(thresh[thresh != 0])
thresh[thresh == 0] = avg_thresh
if eval_attributes:
filename = 'attribute_thresholds_vg.txt'
else:
filename = 'object_thresholds_vg.txt'
path = os.path.join(output_dir, filename)
with open(path, 'wt') as f:
for i, cls in enumerate(classes[1:]):
f.write('{:s} {:.3f}\n'.format(cls, thresh[i]))
weights = np.array(nposs)
weights /= weights.sum()
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights)))
print('Mean Detection Threshold = {:.3f}'.format(avg_thresh))
# print('~~~~~~~~')
# print('Results:')
# for ap, npos in zip(aps, nposs):
# print('{:.3f}\t{:.3f}'.format(ap, npos))
# print('{:.3f}'.format(np.mean(aps)))
# print('~~~~~~~~')
# print('')
# print('--------------------------------------------------------------')
print('Results computed with the **unofficial** PASCAL VOC Python eval code.')
print('--------------------------------------------------------------') | 12,145 | 41.767606 | 101 | py |
MPMQA | MPMQA-master/detector/bua/__init__.py | from .d2 import add_attribute_config
from .caffe import add_bottom_up_attention_config
def add_config(args, cfg):
if args.mode == "caffe":
add_bottom_up_attention_config(cfg, True)
elif args.mode == "d2":
add_attribute_config(cfg)
else:
raise Exception("detection model not supported: {}".format(args.model))
from . import visual_genome | 373 | 33 | 79 | py |
MPMQA | MPMQA-master/detector/bua/d2/modeling/roi_heads.py |
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import (
build_box_head,
build_mask_head,
select_foreground_proposals,
ROI_HEADS_REGISTRY,
ROI_BOX_HEAD_REGISTRY,
ROIHeads,
Res5ROIHeads,
StandardROIHeads,
)
from detectron2.modeling.roi_heads.box_head import FastRCNNConvFCHead
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.poolers import ROIPooler
from bua.caffe.modeling.box_regression import BUABox2BoxTransform
"""
roi head for mode detectron2
"""
@ROI_BOX_HEAD_REGISTRY.register()
class AttributeFastRCNNConvFCHead(FastRCNNConvFCHead):
"""
Modified version of FastRCNNConvFCHead which output last two FC outputs
"""
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
y = None
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
y = x
x = F.relu(layer(y))
return x, y
class AttributePredictor(nn.Module):
"""
Head for attribute prediction, including feature/score computation and
loss computation.
"""
def __init__(self, cfg, input_dim):
super().__init__()
# fmt: off
self.num_objs = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.obj_embed_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.OBJ_EMBED_DIM
self.fc_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.FC_DIM
self.num_attributes = cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_CLASSES
self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS
self.loss_weight = cfg.MODEL.ROI_ATTRIBUTE_HEAD.LOSS_WEIGHT
# fmt: on
# object class embedding, including the background class
self.obj_embed = nn.Embedding(self.num_objs + 1, self.obj_embed_dim)
input_dim += self.obj_embed_dim
self.fc = nn.Sequential(
nn.Linear(input_dim, self.fc_dim),
nn.ReLU()
)
self.attr_score = nn.Linear(self.fc_dim, self.num_attributes)
nn.init.normal_(self.attr_score.weight, std=0.01)
nn.init.constant_(self.attr_score.bias, 0)
def forward(self, x, obj_labels):
attr_feat = torch.cat((x, self.obj_embed(obj_labels)), dim=1)
return self.attr_score(self.fc(attr_feat))
def loss(self, score, label):
n = score.shape[0]
score = score.unsqueeze(1)
score = score.expand(n, self.max_attr_per_ins, self.num_attributes).contiguous()
score = score.view(-1, self.num_attributes)
inv_weights = (
(label >= 0).sum(dim=1).repeat(self.max_attr_per_ins, 1).transpose(0, 1).flatten()
)
weights = inv_weights.float().reciprocal()
weights[weights > 1] = 0.
n_valid = len((label >= 0).sum(dim=1).nonzero())
label = label.view(-1)
attr_loss = F.cross_entropy(score, label, reduction="none", ignore_index=-1)
attr_loss = (attr_loss * weights).view(n, -1).sum(dim=1)
if n_valid > 0:
attr_loss = attr_loss.sum() * self.loss_weight / n_valid
else:
attr_loss = attr_loss.sum() * 0.
return {"loss_attr": attr_loss}
class AttributeROIHeads(ROIHeads):
"""
An extension of ROIHeads to include attribute prediction.
"""
def forward_attribute_score(self, box_features, obj_labels):
attribute_scores = self.attribute_predictor(box_features, obj_labels)
return attribute_scores
def forward_attribute_loss(self, proposals, box_features):
proposals, fg_selection_attributes = select_foreground_proposals(
proposals, self.num_classes
)
attribute_features = box_features[torch.cat(fg_selection_attributes, dim=0)]
obj_labels = torch.cat([p.gt_classes for p in proposals])
attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)
attribute_scores = self.attribute_predictor(attribute_features, obj_labels)
return self.attribute_predictor.loss(attribute_scores, attribute_labels)
def forward_attr(self, proposals, box_features):
proposals, fg_selection_attributes = select_foreground_proposals(
proposals, self.num_classes
)
attribute_features = box_features[torch.cat(fg_selection_attributes, dim=0)]
obj_labels = torch.cat([p.gt_classes for p in proposals])
attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)
attribute_scores = self.attribute_predictor(attribute_features, obj_labels)
return attribute_scores
@ROI_HEADS_REGISTRY.register()
class AttributeRes5ROIHeads(AttributeROIHeads, Res5ROIHeads):
"""
An extension of Res5ROIHeads to include attribute prediction.
"""
def __init__(self, cfg, input_shape):
# super(Res5ROIHeads, self).__init__(cfg, input_shape) # d2 0.1.1
super(Res5ROIHeads, self).__init__(cfg) # d2 0.2.1
# added to fit d2 0.2.1
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.mask_on = cfg.MODEL.MASK_ON
self.attribute_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
# fmt: on
assert not cfg.MODEL.KEYPOINT_ON
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
self.box_predictor = FastRCNNOutputLayers(
cfg, ShapeSpec(channels=out_channels, height=1, width=1)
)
if self.mask_on:
self.mask_head = build_mask_head(
cfg,
ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),
)
if self.attribute_on:
self.attribute_predictor = AttributePredictor(cfg, out_channels)
def forward(self, images, features, proposals, targets=None):
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3])
predictions = self.box_predictor(feature_pooled)
if self.training:
del features
losses = self.box_predictor.losses(predictions, proposals)
if self.mask_on:
proposals, fg_selection_masks = select_foreground_proposals(
proposals, self.num_classes
)
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
del box_features
losses.update(self.mask_head(mask_features, proposals))
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, feature_pooled))
return [], losses
elif self.extract_on:
pred_class_logits, pred_proposal_deltas = predictions
# pred_class_logits = pred_class_logits[:, :-1] # background is last
cls_lables = torch.argmax(pred_class_logits, dim=1)
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attribute_on:
attr_scores = self.forward_attribute_score(feature_pooled, cls_lables)
return proposal_boxes, self.predict_probs(pred_class_logits, num_preds_per_image), feature_pooled.split(num_preds_per_image, dim=0), attr_scores.split(num_preds_per_image, dim=0)
else:
return proposal_boxes, self.predict_probs(pred_class_logits, num_preds_per_image), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return self.predict_boxes(proposals, pred_proposal_deltas, num_preds_per_image), self.predict_probs(pred_class_logits, num_preds_per_image)
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def get_conv5_features(self, features):
features = [features[f] for f in self.in_features]
return self.res5(features[0])
def get_roi_features(self, features, proposals):
assert len(self.in_features) == 1
features = [features[f] for f in self.in_features]
box_features = self._shared_roi_transform(
features, [x.proposal_boxes for x in proposals]
)
pooled_features = box_features.mean(dim=[2, 3])
return box_features, pooled_features, None
def predict_boxes(self, proposals, pred_proposal_deltas, num_preds_per_image):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals = box_type.cat([p.proposal_boxes for p in proposals])
num_pred = len(proposals)
B = proposals.tensor.shape[1]
K = pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
pred_proposal_deltas,
proposals.tensor,
)
return boxes.view(num_pred, K * B).split(num_preds_per_image, dim=0)
def predict_probs(self, pred_class_logits, num_preds_per_image):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(pred_class_logits, dim=-1)
probs = probs[:, :-1] # background is last
return probs.split(num_preds_per_image, dim=0)
@ROI_HEADS_REGISTRY.register()
class AttributeStandardROIHeads(AttributeROIHeads, StandardROIHeads):
"""
An extension of StandardROIHeads to include attribute prediction.
"""
def __init__(self, cfg, input_shape):
super(StandardROIHeads, self).__init__(cfg, input_shape)
self._init_box_head(cfg, input_shape)
self._init_mask_head(cfg, input_shape)
self._init_keypoint_head(cfg, input_shape)
def _init_box_head(self, cfg, input_shape):
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
self.attribute_on = cfg.MODEL.ATTRIBUTE_ON
# fmt: on
in_channels = [input_shape[f].channels for f in self.in_features]
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels, height=pooler_resolution, width=pooler_resolution
),
)
self.box_predictor = FastRCNNOutputLayers(cfg, self.box_head.output_shape)
if self.attribute_on:
self.attribute_predictor = AttributePredictor(
cfg, self.box_head.output_shape.channels
)
def _forward_box(self, features, proposals):
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features, _ = self.box_head(box_features)
predictions = self.box_predictor(box_features)
if self.training:
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
losses = self.box_predictor.losses(predictions, proposals)
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, box_features))
del box_features
return losses
else:
pred_instances, r_indices = self.box_predictor.inference(
predictions, proposals
)
return pred_instances[0], r_indices[0]
def get_conv5_features(self, features):
assert len(self.in_features) == 1
features = [features[f] for f in self.in_features]
return features[0]
def get_roi_features(self, features, proposals):
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
fc7, fc6 = self.box_head(box_features)
return box_features, fc7, fc6 | 14,740 | 41.359195 | 198 | py |
MPMQA | MPMQA-master/detector/bua/d2/dataloader/build_loader.py |
import logging
import operator
import torch.utils.data
from detectron2.utils.comm import get_world_size
from detectron2.data import samplers
from detectron2.data.build import get_detection_dataset_dicts, worker_init_reset_seed, trivial_batch_collator
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
from .dataset_mapper import AttributeDatasetMapper
"""
data_loader for mode detectron2
"""
def build_detection_train_loader_with_attributes(cfg, mapper=None):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
# NOTE above is added
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = AttributeDatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0),
worker_init_fn=worker_init_reset_seed,
)
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
def build_detection_test_loader_with_attributes(cfg, dataset_name, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = AttributeDatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader | 3,864 | 34.458716 | 109 | py |
MPMQA | MPMQA-master/detector/bua/d2/dataloader/dataset_mapper.py |
import copy
import logging
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data import DatasetMapper
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
polygons_to_bitmask,
)
"""
data mapper for mode detecrton2
"""
def annotations_to_instances_with_attributes(annos,
image_size,
mask_format="polygon",
load_attributes=False,
max_attr_per_ins=16):
"""
Extend the function annotations_to_instances() to support attributes
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
if len(annos) and load_attributes:
attributes = -torch.ones((len(annos), max_attr_per_ins), dtype=torch.int64)
for idx, anno in enumerate(annos):
if "attribute_ids" in anno:
for jdx, attr_id in enumerate(anno["attribute_ids"]):
attributes[idx, jdx] = attr_id
target.gt_attributes = attributes
return target
class AttributeDatasetMapper(DatasetMapper):
"""
Extend DatasetMapper to support attributes.
"""
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
# fmt: off
self.attribute_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS
# fmt: on
# NOTE Added to fit d202
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
else:
self.crop_gen = None
self.tfm_gens = utils.build_transform_gen(cfg, is_train)
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self.mask_on = cfg.MODEL.MASK_ON
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
# NOTE ok
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
# NOTE Added to fit d202
image = utils.read_image(dataset_dict["file_name"], format=self.image_format) # image_format
# image = utils.read_image(dataset_dict["file_name"], format=self.img_format) # image_format
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2]
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if self.load_proposals:
utils.transform_proposals(
dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
if not self.attribute_on:
anno.pop("attribute_ids")
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances_with_attributes(
annos, image_shape, mask_format=self.mask_format,
load_attributes=self.attribute_on, max_attr_per_ins=self.max_attr_per_ins
)
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if "sem_seg_file_name" in dataset_dict:
with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f:
sem_seg_gt = Image.open(f)
sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
dataset_dict["sem_seg"] = sem_seg_gt
return dataset_dict
| 7,189 | 38.505495 | 100 | py |
MPMQA | MPMQA-master/detector/bua/caffe/config.py | # -*- coding: utf-8 -*-
from detectron2.config import CfgNode as CN
def add_bottom_up_attention_config(cfg, caffe=False):
"""
Add config for tridentnet.
"""
_C = cfg
_C.MODEL.BUA = CN()
_C.MODEL.BUA.CAFFE = caffe
_C.MODEL.BUA.RESNET_VERSION = 1
_C.MODEL.BUA.ATTRIBUTE_ON = False
_C.MODEL.BUA.EXTRACT_FEATS = False
_C.MODEL.BUA.RPN = CN()
# out_channels of conv for bottom-up-attentions RPN.
_C.MODEL.BUA.RPN.CONV_OUT_CHANNELS = 512
_C.MODEL.BUA.EXTRACTOR = CN()
# EXTRACTOR.MODE {1: extract roi features, 2: extract bbox only ,3: extract roi features by gt_bbox}
_C.MODEL.BUA.EXTRACTOR.MODE = 1
# config of postprocessing in extractor
_C.MODEL.BUA.EXTRACTOR.MIN_BOXES = 10
_C.MODEL.BUA.EXTRACTOR.MAX_BOXES = 100
_C.MODEL.BUA.EXTRACTOR.CONF_THRESH = 0.2
_C.MODEL.BUA.EXTRACTOR.OUTPUT_DIR = ".output/"
_C.MODEL.BUA.ATTRIBUTE = CN()
_C.MODEL.BUA.ATTRIBUTE.NUM_CLASSES = 401
| 969 | 25.944444 | 104 | py |
MPMQA | MPMQA-master/detector/bua/caffe/postprocessing.py |
import numpy as np
import torch
from detectron2.structures import Instances
from modeling.layers.nms import nms # BC-compat
def extractor_postprocess(boxes, scores, features_pooled, input_per_image, extractor):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
MIN_BOXES = extractor.MIN_BOXES
MAX_BOXES = extractor.MAX_BOXES
CONF_THRESH = extractor.CONF_THRESH
cur_device = scores.device
dets = boxes / input_per_image["im_scale"]
max_conf = torch.zeros((scores.shape[0])).to(cur_device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep],
cls_scores[keep],
max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= CONF_THRESH).flatten()
if len(keep_boxes) < MIN_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MAX_BOXES]
# keep_boxes = torch.argsort(max_conf, descending=True)[:100]
# feat_list.append(feats[i][keep_boxes])
image_feat = features_pooled[keep_boxes]
image_bboxes = dets[keep_boxes]
return image_feat, image_bboxes | 2,055 | 36.381818 | 87 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/box_regression.py |
import math
import torch
from detectron2.structures import Boxes
from typing import List, Tuple, Union
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
__all__ = ["BUABoxes", "BUABox2BoxTransform"]
class BUABoxes(Boxes):
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor: float matrix of Nx4.
"""
BoxSizeType = Union[List[int], Tuple[int, int]]
def __init__(self, tensor: torch.Tensor):
super().__init__(tensor)
def clip(self, box_size: BoxSizeType) -> None:
"""
NOTE: In order to be the same as bottom-up-attention network, we have
defined the new clip function.
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
TO_REMOVE = 1
h, w = box_size
self.tensor[:, 0].clamp_(min=0, max=w - TO_REMOVE)
self.tensor[:, 1].clamp_(min=0, max=h - TO_REMOVE)
self.tensor[:, 2].clamp_(min=0, max=w - TO_REMOVE)
self.tensor[:, 3].clamp_(min=0, max=h - TO_REMOVE)
def nonempty(self, threshold: int = 0) -> torch.Tensor:
"""
NOTE: In order to be the same as bottom-up-attention network, we have
defined the new nonempty function.
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
TO_REMOVE = 1
box = self.tensor
widths = box[:, 2] - box[:, 0] + TO_REMOVE
heights = box[:, 3] - box[:, 1] + TO_REMOVE
keep = (widths > threshold) & (heights > threshold)
return keep
def filter_boxes(self):
box = self.tensor
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return keep
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes":
"""
Returns:
BUABoxes: Create a new :class:`BUABoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BUABoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return BUABoxes(b)
class BUABox2BoxTransform(object):
"""
The box-to-box transform defined in R-CNN. The transformation is parameterized
by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(self, weights, scale_clamp=_DEFAULT_SCALE_CLAMP):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
TO_REMOVE = 1 # TODO remove
src_widths = src_boxes[:, 2] - src_boxes[:, 0] + TO_REMOVE
src_heights = src_boxes[:, 3] - src_boxes[:, 1] + TO_REMOVE
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0] + TO_REMOVE
target_heights = target_boxes[:, 3] - target_boxes[:, 1] + TO_REMOVE
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
assert torch.isfinite(deltas).all().item(), "Box regression deltas become infinite or NaN!"
boxes = boxes.to(deltas.dtype)
TO_REMOVE = 1 # TODO remove
widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes | 7,861 | 40.378947 | 99 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/fast_rcnn.py |
import logging
import numpy as np
import torch
from fvcore.nn import smooth_l1_loss
from torch import nn
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads import select_foreground_proposals
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference, fast_rcnn_inference_single_image, FastRCNNOutputs
from .layers.nms import batched_nms
from .box_regression import BUABoxes
logger = logging.getLogger(__name__)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
class FastRCNNOutputs:
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
The total number of all instances must be equal to R.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# "gt_classes" exists if and only if training. But other gt fields may
# not necessarily exist in training for images that have no groundtruth.
if proposals[0].has("gt_classes"):
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = [
p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals
]
self.gt_boxes = box_type.cat(gt_boxes)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(self.proposals) == 0 # no instances found
def softmax_cross_entropy_loss(self):
"""
Deprecated
"""
_log_classification_stats(self.pred_class_logits, self.gt_classes)
return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def box_reg_loss(self):
"""
Deprecated
"""
if self._no_instances:
return 0.0 * self.pred_proposal_deltas.sum()
box_dim = self.proposals.tensor.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds should produce a valid loss of zero because reduction=sum.
fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange(
box_dim, device=device
)
if self.box_reg_loss_type == "smooth_l1":
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
self.proposals.tensor[fg_inds],
)
loss_box_reg = giou_loss(
fg_pred_boxes,
self.gt_boxes.tensor[fg_inds],
reduction="sum",
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def losses(self):
"""
Deprecated
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()}
def predict_boxes(self):
"""
Deprecated
"""
pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)
return pred.split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Deprecated
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
class BUACaffeFastRCNNOutputs(object):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta, image_scales, attr_on=False
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.image_scales = image_scales
self.attr_on = attr_on
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert not self.proposals.tensor.requires_grad, "Proposals should not require gradients!"
self.image_shapes = [x.image_size for x in proposals]
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
def fast_rcnn_inference(self, boxes, scores, image_shapes, image_scales, score_thresh, nms_thresh, topk_per_image):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
self.fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, image_scale, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape, image_scale in zip(scores, boxes, image_shapes, image_scales)
]
return tuple(list(x) for x in zip(*result_per_image))
def fast_rcnn_inference_single_image(
self, boxes, scores, image_shape, image_scale, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
scores = scores[:, 1:]
boxes = boxes[:, 4:]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = BUABoxes(boxes.reshape(-1, 4))
boxes.clip((image_shape[0]/image_scale, image_shape[1]/image_scale))
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# Apply per-class NMS
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = BUABoxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
def predict_boxes(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
self.proposals.scale(1.0/self.image_scales[0], 1.0/self.image_scales[0])
num_pred = len(self.proposals)
B = self.proposals.tensor.shape[1]
K = self.pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas,
self.proposals.tensor,
)
return boxes.view(num_pred, K * B).split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
def inference(self, score_thresh, nms_thresh, topk_per_image):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
boxes = self.predict_boxes()
scores = self.predict_probs()
image_shapes = self.image_shapes
image_scales = self.image_scales
return self.fast_rcnn_inference(
boxes, scores, image_shapes, image_scales, score_thresh, nms_thresh, topk_per_image
)
class BUACaffeFastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, attr_on=False, num_attr_classes=401):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(BUACaffeFastRCNNOutputLayers, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.attr_on = attr_on
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.attr_on:
self.cls_embed = nn.Embedding(num_classes, 256)
self.attr_linear1 = nn.Linear(input_size + 256, 512)
self.attr_linear2 = nn.Linear(512, num_attr_classes)
nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, x, proposal_boxes=None):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
if self.attr_on:
# get labels and indices of proposals with foreground
all_labels = torch.argmax(scores, dim=1)
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(all_labels)
# concat with fc7 feats
concat_attr = cat([x, cls_embed_out], dim=1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = F.softmax(self.attr_linear2(F.relu(fc_attr)), dim=-1)
return scores, proposal_deltas, attr_score
return scores, proposal_deltas
class BUADetection2FastRCNNOutputs(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta, attr_on=False, pred_attribute_logits=None, num_attr_classes=400, gt_attributes=None
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
pred_attribute_logits (Tensor:) A tensor of shape (R, C) storing the predicted attribute
logits for all R predicted object instances.
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.attr_on = attr_on
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
if self.attr_on:
self.pred_attribute_logits = pred_attribute_logits
self.gt_attributes = gt_attributes
self.smooth_l1_beta = smooth_l1_beta
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert not self.proposals.tensor.requires_grad, "Proposals should not require gradients!"
self.image_shapes = [x.image_size for x in proposals]
self.num_attr_classes = num_attr_classes
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
def _log_accuracy(self):
"""
Log the accuracy metrics to EventStorage.
"""
num_instances = self.gt_classes.numel()
pred_classes = self.pred_class_logits.argmax(dim=1)
bg_class_ind = self.pred_class_logits.shape[1] - 1
fg_inds = (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)
num_fg = fg_inds.nonzero().numel()
fg_gt_classes = self.gt_classes[fg_inds]
fg_pred_classes = pred_classes[fg_inds]
num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
num_accurate = (pred_classes == self.gt_classes).nonzero().numel()
fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
storage = get_event_storage()
storage.put_scalar("fast_rcnn/cls_accuracy", num_accurate / num_instances)
if num_fg > 0:
storage.put_scalar("fast_rcnn/fg_cls_accuracy", fg_num_accurate / num_fg)
storage.put_scalar("fast_rcnn/false_negative", num_false_negative / num_fg)
def softmax_cross_entropy_loss(self):
"""
Compute the softmax cross entropy loss for box classification.
Returns:
scalar Tensor
"""
self._log_accuracy()
return F.cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def smooth_l1_loss(self):
"""
Compute the smooth L1 loss for box regression.
Returns:
scalar Tensor
"""
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)).squeeze(
1
)
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = self.gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
# The loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def attribute_loss(self):
fg_gt_attributes = self.gt_attributes
n_boxes = self.pred_attribute_logits.shape[0]
self.pred_attribute_logits = self.pred_attribute_logits.unsqueeze(1)
self.pred_attribute_logits = self.pred_attribute_logits.expand(n_boxes, 16, self.num_attr_classes).contiguous().view(-1, self.num_attr_classes)
inv_per_box_weights = (
(fg_gt_attributes >= 0).sum(dim=1).repeat(16, 1).transpose(0, 1).flatten()
)
per_box_weights = inv_per_box_weights.float().reciprocal()
per_box_weights[per_box_weights > 1] = 0.0
fg_gt_attributes = fg_gt_attributes.view(-1)
attributes_loss = 0.5 * F.cross_entropy(
self.pred_attribute_logits, fg_gt_attributes, reduction="none", ignore_index=-1
)
attributes_loss = (attributes_loss * per_box_weights).view(n_boxes, -1).sum(dim=1)
n_valid_boxes = len(attributes_loss.nonzero())
if n_valid_boxes > 0:
attributes_loss = (attributes_loss / n_valid_boxes).sum()
else:
attributes_loss = (attributes_loss * 0.0).sum()
return attributes_loss
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
return {
"loss_cls": self.softmax_cross_entropy_loss(),
"loss_box_reg": self.smooth_l1_loss(),
"loss_attr": self.attribute_loss() if self.attr_on else 0.,
}
def predict_boxes(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
num_pred = len(self.proposals)
B = self.proposals.tensor.shape[1]
K = self.pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas.view(num_pred * K, B),
self.proposals.tensor.unsqueeze(1).expand(num_pred, K, B).reshape(-1, B),
)
return boxes.view(num_pred, K * B).split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
def inference(self, score_thresh, nms_thresh, topk_per_image):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
boxes = self.predict_boxes()
scores = self.predict_probs()
image_shapes = self.image_shapes
return fast_rcnn_inference(
boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
)
class BUADetectron2FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, attr_on=False, num_attr_classes=400):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(BUADetectron2FastRCNNOutputLayers, self).__init__()
self.attr_on = attr_on
self.num_classes = num_classes
self.num_attr_classes = num_attr_classes
if not isinstance(input_size, int):
input_size = np.prod(input_size)
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.attr_on:
self.cls_embed = nn.Embedding(num_classes+1, 256)
self.attr_linear1 = nn.Linear(input_size + 256, 512)
self.attr_linear2 = nn.Linear(512, num_attr_classes)
# nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, x, proposal_boxes=None):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
if self.attr_on:
if self.training:
assert proposal_boxes is not None, "Proposals are None while attr=True"
proposals, fg_selection_atrributes = select_foreground_proposals(proposal_boxes, self.num_classes)
attribute_features = x[torch.cat(fg_selection_atrributes, dim=0)]
cls_labels = torch.cat([prop.gt_classes for prop in proposals])
else:
# get labels and indices of proposals with foreground
cls_labels = torch.argmax(scores, dim=1)
attribute_features = x
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(cls_labels)
# concat with fc7 feats
concat_attr = cat([attribute_features, cls_embed_out], dim=1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = self.attr_linear2(F.relu(fc_attr))
return scores, proposal_deltas, attr_score, cat([p.gt_attributes for p in proposals], dim=0) if self.training else None
return scores, proposal_deltas | 34,315 | 44.754667 | 184 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rpn_outputs.py |
import itertools
import logging
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.nn import smooth_l1_loss
from detectron2.layers import cat
from detectron2.structures import Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.sampling import subsample_labels
from .box_regression import BUABoxes
from .layers.nms import batched_nms
def find_top_bua_rpn_proposals(
proposals,
pred_objectness_logits,
images,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_side_len,
training,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
images (ImageList): Input images as an :class:`ImageList`.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_side_len (float): minimum proposal box side length in pixels (absolute units
wrt input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
image_sizes = images.image_sizes # in (h, w) order
image_scales = images.image_scales
device = proposals[0].device
# 1. Concat all levels together
all_scores = []
all_proposals = []
level_ids = []
for level_id, proposals_i, logits_i in zip(
itertools.count(), proposals, pred_objectness_logits
):
Hi_Wi_A = logits_i.shape[1]
all_proposals.append(proposals_i)
all_scores.append(logits_i)
level_ids.append(torch.full((Hi_Wi_A,), level_id, dtype=torch.int64, device=device))
all_scores = cat(all_scores, dim=1)
all_proposals = cat(all_proposals, dim=1)
level_ids = cat(level_ids, dim=0)
# 2. For each image, run a choose pre_nms_topk proposal ,per-level NMS, and choose post_nms_topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = BUABoxes(all_proposals[n])
scores_per_img = all_scores[n]
boxes.clip(image_size)
keep = boxes.filter_boxes()
boxes = boxes[keep]
scores_per_img = scores_per_img[keep]
lvl = level_ids[keep]
# filter empty boxes
keep = boxes.nonempty(threshold=min_box_side_len*image_scales[n])
if keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
# choose pre_nms_topk proposal
Hi_Wi_A = scores_per_img.shape[0]
num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
scores_per_img, idx = scores_per_img.sort(descending=True, dim=0)
topk_scores_i = scores_per_img[:num_proposals_i]
topk_idx = idx[:num_proposals_i]
topk_boxes_i = boxes[topk_idx, :]
lvl_i = lvl[topk_idx]
keep = batched_nms(topk_boxes_i.tensor, topk_scores_i, lvl_i, nms_thresh)
# In Detectron1, there was different behavior during training vs. testing.
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep = keep[:post_nms_topk]
res = Instances(image_size)
res.proposal_boxes = topk_boxes_i[keep]
res.objectness_logits = topk_scores_i[keep]
results.append(res)
return results
class BUARPNOutputs(object):
def __init__(
self,
box2box_transform,
anchor_matcher,
batch_size_per_image,
positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
boundary_threshold=0,
gt_boxes=None,
smooth_l1_beta=0.0,
):
"""
Args:
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for
anchor-proposal transformations.
anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to
ground-truth boxes; used to determine training labels.
batch_size_per_image (int): number of proposals to sample when training
positive_fraction (float): target fraction of sampled proposals that should be positive
images (ImageList): :class:`ImageList` instance representing N input images
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for anchors.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, A*4, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
anchors (list[list[Boxes]]): A list of N elements. Each element is a list of L
Boxes. The Boxes at (n, l) stores the entire anchor array for feature map l in image
n (i.e. the cell anchors repeated over all locations in feature map (n, l)).
boundary_threshold (int): if >= 0, then anchors that extend beyond the image
boundary by more than boundary_thresh are not used in training. Set to a very large
number or < 0 to disable this behavior. Only needed in training.
gt_boxes (list[Boxes], optional): A list of N elements. Element i a Boxes storing
the ground-truth ("gt") boxes for image i.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.box2box_transform = box2box_transform
self.anchor_matcher = anchor_matcher
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.pred_objectness_logits = pred_objectness_logits
self.pred_anchor_deltas = pred_anchor_deltas
self.anchors = anchors
self.gt_boxes = gt_boxes
self.num_feature_maps = len(pred_objectness_logits)
self.num_images = len(images)
self.image_sizes = images.image_sizes
self.boundary_threshold = boundary_threshold
self.smooth_l1_beta = smooth_l1_beta
def _get_ground_truth(self):
"""
Returns:
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
total number of anchors in image i (i.e., len(anchors[i])). Label values are
in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 4).
"""
gt_objectness_logits = []
gt_anchor_deltas = []
# Concatenate anchors from all feature maps into a single Boxes per image
anchors = [BUABoxes.cat(anchors_i) for anchors_i in self.anchors]
for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes):
"""
image_size_i: (h, w) for the i-th image
anchors_i: anchors for i-th image
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix = pairwise_iou(gt_boxes_i, anchors_i)
matched_idxs, gt_objectness_logits_i = self.anchor_matcher(match_quality_matrix)
if self.boundary_threshold >= 0:
# Discard anchors that go out of the boundaries of the image
# NOTE: This is legacy functionality that is turned off by default in Detectron2
anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold)
gt_objectness_logits_i[~anchors_inside_image] = -1
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor)
else:
# TODO wasted computation for ignored boxes
matched_gt_boxes = gt_boxes_i[matched_idxs]
gt_anchor_deltas_i = self.box2box_transform.get_deltas(
anchors_i.tensor, matched_gt_boxes.tensor
)
gt_objectness_logits.append(gt_objectness_logits_i)
gt_anchor_deltas.append(gt_anchor_deltas_i)
return gt_objectness_logits, gt_anchor_deltas
def losses(self):
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
def resample(label):
"""
Randomly sample a subset of positive and negative examples by overwriting
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
"""
pos_idx, neg_idx = subsample_labels(
label, self.batch_size_per_image, self.positive_fraction, 0
)
# Fill with the ignore label (-1), then set positive and negative labels
label.fill_(-1)
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
gt_objectness_logits, gt_anchor_deltas = self._get_ground_truth()
"""
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
total number of anchors in image i (i.e., len(anchors[i]))
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), B),
where B is the box dimension
"""
# Collect all objectness labels and delta targets over feature maps and images
# The final ordering is L, N, H, W, A from slowest to fastest axis.
num_anchors_per_map = [int(np.prod(x.shape[1:])/2) for x in self.pred_objectness_logits]
num_anchors_per_image = sum(num_anchors_per_map)
# Stack to: (N, num_anchors_per_image)
gt_objectness_logits = torch.stack(
[resample(label) for label in gt_objectness_logits], dim=0
)
# Log the number of positive/negative anchors per-image that's used in training
num_pos_anchors = (gt_objectness_logits == 1).sum().item()
num_neg_anchors = (gt_objectness_logits == 0).sum().item()
storage = get_event_storage()
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / self.num_images)
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / self.num_images)
assert gt_objectness_logits.shape[1] == num_anchors_per_image
# Split to tuple of L tensors, each with shape (N, num_anchors_per_map)
gt_objectness_logits = torch.split(gt_objectness_logits, num_anchors_per_map, dim=1)
# Concat from all feature maps
gt_objectness_logits = cat([x.flatten() for x in gt_objectness_logits], dim=0)
# Stack to: (N, num_anchors_per_image, B)
gt_anchor_deltas = torch.stack(gt_anchor_deltas, dim=0)
assert gt_anchor_deltas.shape[1] == num_anchors_per_image
B = gt_anchor_deltas.shape[2] # box dimension (4 or 5)
# Split to tuple of L tensors, each with shape (N, num_anchors_per_image)
gt_anchor_deltas = torch.split(gt_anchor_deltas, num_anchors_per_map, dim=1)
# Concat from all feature maps
gt_anchor_deltas = cat([x.reshape(-1, B) for x in gt_anchor_deltas], dim=0)
# Collect all objectness logits and delta predictions over feature maps
# and images to arrive at the same shape as the labels and targets
# The final ordering is L, N, H, W, 2A from slowest to fastest axis.
pred_objectness_logits = cat(
[
# Reshape: (N, 2A, Hi, Wi) -> (N, Hi, Wi, 2A) -> (N*Hi*Wi*A, 2)
x.permute(0, 2, 3, 1).reshape(-1, 2)
for x in self.pred_objectness_logits
],
dim=0,
)
pred_anchor_deltas = cat(
[
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B)
# -> (N*Hi*Wi*A, B)
x.view(x.shape[0], -1, B, x.shape[-2], x.shape[-1])
.permute(0, 3, 4, 1, 2)
.reshape(-1, B)
for x in self.pred_anchor_deltas
],
dim=0,
)
objectness_loss, localization_loss = bua_rpn_losses(
gt_objectness_logits,
gt_anchor_deltas,
pred_objectness_logits,
pred_anchor_deltas,
self.smooth_l1_beta,
)
normalizer = 1.0 / (self.batch_size_per_image * self.num_images)
loss_cls = objectness_loss * normalizer # cls: classification loss
loss_loc = localization_loss * normalizer # loc: localization loss
losses = {"loss_rpn_cls": loss_cls, "loss_rpn_loc": loss_loc}
return losses
def predict_proposals(self):
"""
Transform anchors into proposals by applying the predicted anchor deltas.
Returns:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A, B), where B is box dimension (4 or 5).
"""
proposals = []
# Transpose anchors from images-by-feature-maps (N, L) to feature-maps-by-images (L, N)
anchors = list(zip(*self.anchors))
# anchors = list(zip(*[self.anchors]))
# For each feature map
for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
B = anchors_i[0].tensor.size(1)
N, _, Hi, Wi = pred_anchor_deltas_i.shape
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N*Hi*Wi*A, B)
pred_anchor_deltas_i = (
pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B)
)
# Concatenate all anchors to shape (N*Hi*Wi*A, B)
# type(anchors_i[0]) is Boxes (B = 4) or RotatedBoxes (B = 5)
anchors_i = type(anchors_i[0]).cat(anchors_i)
proposals_i = self.box2box_transform.apply_deltas(
pred_anchor_deltas_i, anchors_i.tensor
)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals.append(proposals_i.view(N, -1, B))
return proposals
def predict_objectness_logits(self):
"""
Return objectness logits in the same format as the proposals returned by
:meth:`predict_proposals`.
Returns:
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A).
"""
pred_objectness_logits = [
# Reshape: (N, 2A, Hi, Wi) -> (N, 2, A, Hi, Wi) -> (N, Hi, Wi, 1, A) -> (N, Hi*Wi*A)
F.softmax(score.view(score.shape[0], 2, int(float(score.shape[1]) / float(2)), score.shape[2], score.shape[3]), dim=1)[:, 1:, :, :, :]\
.permute(0, 3, 4, 1, 2).reshape(self.num_images, -1)
for score in self.pred_objectness_logits
]
return pred_objectness_logits
def bua_rpn_losses(
gt_objectness_logits,
gt_anchor_deltas,
pred_objectness_logits,
pred_anchor_deltas,
smooth_l1_beta,
):
"""
Args:
gt_objectness_logits (Tensor): shape (N,), each element in {-1, 0, 1} representing
ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object.
gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth
box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to
its matched ground-truth box.
pred_objectness_logits (Tensor): shape (N, 2), each element is a predicted objectness
logit.
pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box
transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da)
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
Returns:
objectness_loss, localization_loss, both unnormalized (summed over samples).
"""
pos_masks = gt_objectness_logits == 1
localization_loss = smooth_l1_loss(
pred_anchor_deltas[pos_masks], gt_anchor_deltas[pos_masks], smooth_l1_beta, reduction="sum"
)
valid_masks = gt_objectness_logits >= 0
objectness_loss = F.cross_entropy(
pred_objectness_logits[valid_masks],
gt_objectness_logits[valid_masks].to(torch.long),
reduction="sum",
)
return objectness_loss, localization_loss | 18,425 | 44.722084 | 147 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rcnn.py |
import logging, os
import torch
from torch import nn
import torch.nn.functional as F
from detectron2.structures import ImageList
from detectron2.utils.logger import log_first_n
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.modeling.meta_arch import META_ARCH_REGISTRY
# from models.bua_caffe.postprocessing import extractor_postprocess
#from utils import save_features
__all__ = ["GeneralizedBUARCNN"]
@META_ARCH_REGISTRY.register()
class GeneralizedBUARCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.bua_caffe = cfg.MODEL.BUA.CAFFE
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.backbone = build_backbone(cfg)
self.in_features = cfg.MODEL.RPN.IN_FEATURES
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.extractor = cfg.MODEL.BUA.EXTRACTOR
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.resnet_version == 2:
for f in features:
out = self.roi_heads.res5[0].norm(features[f])
features[f] = F.relu_(out)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if self.resnet_version == 2:
for f in features:
out = self.roi_heads.res5[0].norm(features[f])
features[f] = F.relu_(out)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
if self.extract_on:
return self.roi_heads(images, features, proposals, None)
else:
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
if not self.bua_caffe:
results_per_image = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": results_per_image})
return processed_results
else:
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
image_scales = [x["im_scale"] for x in batched_inputs]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images.image_scales = image_scales
return images
| 6,893 | 39.552941 | 98 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rpn.py |
from typing import Dict, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.modeling import RPN_HEAD_REGISTRY
from detectron2.layers import ShapeSpec
from detectron2.modeling.proposal_generator import build_rpn_head
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
from detectron2.modeling.anchor_generator import build_anchor_generator
from .box_regression import BUABox2BoxTransform
from detectron2.modeling.matcher import Matcher
from .rpn_outputs import BUARPNOutputs, find_top_bua_rpn_proposals
import copy
@RPN_HEAD_REGISTRY.register()
class StandardBUARPNHead(nn.Module):
"""
RPN classification and regression heads. Uses a 3x3 conv to produce a shared
hidden state from which one 1x1 conv predicts objectness logits for each anchor
and a second 1x1 conv predicts bounding-box deltas specifying how to deform
each anchor into an object proposal.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# Standard RPN is shared across levels:
out_channels = cfg.MODEL.BUA.RPN.CONV_OUT_CHANNELS
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
# RPNHead should take the same input as anchor generator
# NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
anchor_generator = build_anchor_generator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert (
len(set(num_cell_anchors)) == 1
), "Each level must have the same number of cell anchors"
num_cell_anchors = num_cell_anchors[0]
# 3x3 conv for the hidden representation
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
# 1x1 conv for predicting objectness logits
self.objectness_logits = nn.Conv2d(out_channels, num_cell_anchors * 2, kernel_size=1, stride=1)
# 1x1 conv for predicting box2box transform deltas
self.anchor_deltas = nn.Conv2d(
out_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1
)
for l in [self.conv, self.objectness_logits, self.anchor_deltas]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of feature maps
"""
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = F.relu(self.conv(x))
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return pred_objectness_logits, pred_anchor_deltas
@PROPOSAL_GENERATOR_REGISTRY.register()
class BUARPN(nn.Module):
"""
Region Proposal Network, introduced by the Faster R-CNN paper.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
# fmt: off
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.in_features = cfg.MODEL.RPN.IN_FEATURES
self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH
self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE
self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION
self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA
self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT
# fmt: on
# Map from self.training state to train/test settings
self.pre_nms_topk = {
True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN,
False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST,
}
self.post_nms_topk = {
True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN,
False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST,
}
self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH
self.anchor_generator = build_anchor_generator(
cfg, [input_shape[f] for f in self.in_features]
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
self.anchor_matcher = Matcher(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
)
self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features])
def forward(self, images, features, gt_instances=None):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances] or None
loss: dict[Tensor]
"""
gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
del gt_instances
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors_in_image = self.anchor_generator(features)
anchors = [copy.deepcopy(anchors_in_image) for _ in range(len(features[0]))]
# TODO: The anchors only depend on the feature map shape; there's probably
# an opportunity for some optimizations (e.g., caching anchors).
outputs = BUARPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
)
if self.training:
losses = {k: v * self.loss_weight for k, v in outputs.losses().items()}
else:
losses = {}
with torch.no_grad():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative
# w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
proposals = find_top_bua_rpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
# For RPN-only models, the proposals are the final output and we return them in
# high-to-low confidence order.
# For end-to-end models, the RPN proposals are an intermediate state
# and this sorting is actually not needed. But the cost is negligible.
# inds = [p.objectness_logits.sort(descending=True)[1] for p in proposals]
# proposals = [p[ind] for p, ind in zip(proposals, inds)]
return proposals, losses | 7,700 | 42.022346 | 103 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/fast_rcnn_outputs.py | import torch
import torch.functional as F
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.structures import Boxes
class FastRCNNOutputs:
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
The total number of all instances must be equal to R.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# "gt_classes" exists if and only if training. But other gt fields may
# not necessarily exist in training for images that have no groundtruth.
if proposals[0].has("gt_classes"):
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = [
p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals
]
self.gt_boxes = box_type.cat(gt_boxes)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(self.proposals) == 0 # no instances found
def softmax_cross_entropy_loss(self):
"""
Deprecated
"""
_log_classification_stats(self.pred_class_logits, self.gt_classes)
return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def box_reg_loss(self):
"""
Deprecated
"""
if self._no_instances:
return 0.0 * self.pred_proposal_deltas.sum()
box_dim = self.proposals.tensor.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds should produce a valid loss of zero because reduction=sum.
fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange(
box_dim, device=device
)
if self.box_reg_loss_type == "smooth_l1":
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
self.proposals.tensor[fg_inds],
)
loss_box_reg = giou_loss(
fg_pred_boxes,
self.gt_boxes.tensor[fg_inds],
reduction="sum",
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def losses(self):
"""
Deprecated
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()}
def predict_boxes(self):
"""
Deprecated
"""
pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)
return pred.split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Deprecated
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0) | 7,315 | 44.440994 | 100 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/roi_heads.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.utils.events import get_event_storage
from detectron2.modeling import ROI_HEADS_REGISTRY, ROIHeads
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.backbone.resnet import BottleneckBlock
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.layers import get_norm, BatchNorm2d
from .fast_rcnn import BUACaffeFastRCNNOutputs, BUACaffeFastRCNNOutputLayers, BUADetection2FastRCNNOutputs, BUADetectron2FastRCNNOutputLayers
from .box_regression import BUABox2BoxTransform
from .backbone import BottleneckBlockv2
def make_stage(block_class, num_blocks, first_stride, **kwargs):
"""
Create a resnet stage by creating many blocks.
Args:
block_class (class): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
A `stride` argument will be passed to the block constructor.
kwargs: other arguments passed to the block constructor.
Returns:
list[nn.Module]: a list of block module.
"""
blocks = []
for i in range(num_blocks):
if kwargs["dilation"] > 1:
first_stride = 1
blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
return blocks
@ROI_HEADS_REGISTRY.register()
class BUACaffeRes5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
# super().__init__(cfg, input_shape)
super().__init__(cfg)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.attr_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.num_attr_classes = cfg.MODEL.BUA.ATTRIBUTE.NUM_CLASSES
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
if self.resnet_version == 2:
self.res5_bn = BatchNorm2d(out_channels, eps=2e-5)
self.box_predictor = BUACaffeFastRCNNOutputLayers(
out_channels, self.num_classes, self.cls_agnostic_bbox_reg, attr_on=self.attr_on, num_attr_classes=self.num_attr_classes
)
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
dilation = cfg.MODEL.RESNETS.RES5_DILATION
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock if self.resnet_version == 1 else BottleneckBlockv2,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
dilation=dilation,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
if self.resnet_version == 2:
out = self.res5[0].conv1(x)
out = self.res5[0].conv2(out)
out = self.res5[0].conv3(out)
if self.res5[0].shortcut is not None:
shortcut = self.res5[0].shortcut(x)
else:
shortcut = x
out += shortcut
out = self.res5[1:](out)
return F.relu_(self.res5_bn(out))
return self.res5(x)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
image_scales = images.image_scales
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
if self.attr_on:
pred_class_logits, pred_proposal_deltas, attr_scores = self.box_predictor(feature_pooled, proposals)
else:
pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled, proposals)
if not self.extract_on:
del feature_pooled
outputs = BUACaffeFastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
image_scales
)
if self.training:
del features
losses = outputs.losses()
return [], losses
else:
if self.extract_on:
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attr_on:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0), attr_scores.split(num_preds_per_image, dim=0)
else:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return outputs.predict_boxes(), outputs.predict_probs()
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances, {}
@ROI_HEADS_REGISTRY.register()
class BUADetectron2Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
# super().__init__(cfg, input_shape)
super().__init__(cfg)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.attr_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.num_attr_classes = cfg.MODEL.BUA.ATTRIBUTE.NUM_CLASSES
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
if self.resnet_version == 2:
self.res5_bn = BatchNorm2d(out_channels, eps=2e-5)
self.box_predictor = BUADetectron2FastRCNNOutputLayers(
out_channels, self.num_classes, self.cls_agnostic_bbox_reg, \
attr_on=self.attr_on, num_attr_classes=self.num_attr_classes
)
def _sample_proposals(self, matched_idxs, matched_labels, gt_classes, gt_attributes):
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
gt_attributes = gt_attributes[matched_idxs, :]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
gt_clagt_attributes = -torch.ones((len(matched_idxs),16), dtype=torch.int64).cuda()
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs], gt_attributes[sampled_idxs]
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
dilation = cfg.MODEL.RESNETS.RES5_DILATION
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock if self.resnet_version == 1 else BottleneckBlockv2,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
dilation=dilation,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
if self.resnet_version == 2:
out = self.res5[0].conv1(x)
out = self.res5[0].conv2(out)
out = self.res5[0].conv3(out)
if self.res5[0].shortcut is not None:
shortcut = self.res5[0].shortcut(x)
else:
shortcut = x
out += shortcut
out = self.res5[1:](out)
return F.relu_(self.res5_bn(out))
return self.res5(x)
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_sample_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes = [x.gt_boxes for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes, gt_attributes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes, targets_per_image.gt_attributes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
proposals_per_image.gt_attributes = gt_attributes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
# image_scales = images.image_scales
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
if self.attr_on:
pred_class_logits, pred_proposal_deltas, pred_attribute_logits, gt_attributes = self.box_predictor(feature_pooled, proposals)
else:
pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled, proposals)
if not self.extract_on:
del feature_pooled
if self.attr_on:
outputs = BUADetection2FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.attr_on,
pred_attribute_logits=pred_attribute_logits,
num_attr_classes=self.num_attr_classes,
gt_attributes=gt_attributes,
)
else:
outputs = BUADetection2FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.attr_on,
)
if self.training:
del features
losses = outputs.losses()
return [], losses
else:
if self.extract_on:
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attr_on:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0), F.softmax(pred_attribute_logits, dim=-1).split(num_preds_per_image, dim=0)
else:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return outputs.predict_boxes(), outputs.predict_probs()
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances, {}
| 21,094 | 43.882979 | 196 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/backbone.py |
import fvcore.nn.weight_init as weight_init
from torch import nn
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm, BatchNorm2d
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, make_stage
from detectron2.modeling.backbone.resnet import BottleneckBlock, DeformBottleneckBlock, ResNetBlockBase
from .layers.wrappers import Conv2dv2
__all__ = ["BUABasicStem", "BUABasicStemv2", "build_bua_resnet_backbone"]
class BUABasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class BUABasicStemv2(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.norm = BatchNorm2d(in_channels, eps=2e-5)
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=BatchNorm2d(out_channels, eps=2e-5),
)
# weight_init.c2_msra_fill(self.norm)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.norm(x)
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
@BACKBONE_REGISTRY.register()
def build_bua_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
if cfg.MODEL.BUA.RESNET_VERSION == 2:
stem = BUABasicStemv2(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
)
else:
stem = BUABasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock if cfg.MODEL.BUA.RESNET_VERSION == 1 else BottleneckBlockv2
blocks = make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
class BottleneckBlockv2(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
stride_in_1x1 (bool): when stride==2, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2dv2(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=None,
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2dv2(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=None,
)
self.conv2 = Conv2dv2(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
self.conv3 = Conv2dv2(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
self.norm = BatchNorm2d(in_channels, eps=2e-5)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
x_2 = self.norm(x)
x_2 = F.relu_(x_2)
out = self.conv1(x_2)
# out = F.relu_(out)
out = self.conv2(out)
# out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x_2)
else:
shortcut = x
out += shortcut
# out = F.relu_(out)
return out | 9,404 | 33.076087 | 116 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/layers/nms.py |
# from ._utils import _C
from bua.caffe.modeling import _C
from apex import amp
import torch
# Only valid with fp32 inputs - give AMP the hint
nms = amp.float_function(_C.nms)
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
# NOTE: In order to be consistent with bottom-up-attention, we nms core function from maskrcnn-benchmark
def batched_nms(boxes, scores, idxs, iou_threshold):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
boxes = boxes.cpu()
scores = scores.cpu()
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
return box_ops_batched_nms(boxes, scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs).cpu().tolist():
# if id == 0:
# continue
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
def box_ops_batched_nms(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Parameters
----------
boxes : Tensor[N, 4]
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores : Tensor[N]
scores for each one of the boxes
idxs : Tensor[N]
indices of the categories for each one of the boxes.
iou_threshold : float
discards all overlapping boxes
with IoU < iou_threshold
Returns
-------
keep : Tensor
int64 tensor with the indices of
the elements that have been kept by NMS, sorted
in decreasing order of scores
"""
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep | 2,551 | 32.578947 | 104 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/layers/wrappers.py | import math
import torch
from torch.nn.modules.utils import _ntuple
class Conv2dv2(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
x = super().forward(x)
return x | 1,228 | 31.342105 | 84 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/dataset_mapper.py |
import copy
import logging
import numpy as np
import torch
import cv2
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .transform_gen import ResizeShortestEdge
from .detection_utils import annotations_to_instances
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Now it includes resizing and flipping.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
logger = logging.getLogger(__name__)
tfm_gens = []
tfm_gens.append(ResizeShortestEdge(min_size, max_size, cfg.MODEL.PIXEL_MEAN))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
# image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
image = cv2.imread(dataset_dict["file_name"])
h, w = image.shape[:2]
# utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
dataset_dict["im_scale"] = float(image_shape[0])/ float(h)
# Can use uint8 if it turns out to be slow some day
# USER: Remove if you don't use pre-computed proposals.
if self.load_proposals:
utils.transform_proposals(
dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
# Create a tight bounding box from masks, useful when image is cropped
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 6,394 | 37.757576 | 97 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/detection_utils.py | # -*- coding: utf-8 -*-
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import torch
from detectron2.structures import (
Boxes,
BoxMode,
Instances,
)
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# Note that bbox is 1d (per-instance bounding box)
annotation["bbox"] = transforms.apply_box([bbox])[0]
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "attributes" in annotation:
annotation["attributes"] = annotation["attributes"]
return annotation
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
# attributes = [obj["attributes"] for obj in annos]
attributes = []
for obj in annos:
if "attributes" in obj.keys():
attributes.append(obj["attributes"])
else:
attributes.append([-1]*16)
attributes = torch.tensor(attributes, dtype=torch.int64)
target.gt_attributes = attributes
return target | 2,923 | 33.4 | 95 | py |
MPMQA | MPMQA-master/dataset/mqa_page_contrast.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import torch
from torch.nn.utils.rnn import pad_sequence
from collections import defaultdict
from torch.utils.data import DataLoader
from transformers import T5TokenizerFast
import torch.distributed as dist
from .utils import pad_2d_mask
from .mqa_dataset import MQADataset
import sys
sys.path.insert(0, '../')
from parser import get_base_parser
class MQAContrastDataset(MQADataset):
def __init__(self, args, root, tokenizer, split='train'):
super().__init__(args, root, tokenizer, split, task='retrieval')
self.dataid2qaids = defaultdict(list)
for qaid, dataid in self.qaid2dataid.items():
self.dataid2qaids[dataid].append(qaid)
self.manual2dataids = defaultdict(list)
self.manual2qaids = defaultdict(list)
for dataid, datum in enumerate(self.data):
name = datum['image_filename'].split('/')[1]
self.manual2dataids[name].append(dataid)
self.manual2qaids[name].extend(self.dataid2qaids[dataid])
self.manuals = list(self.manual2dataids.keys())
print(f'Total {len(self.manuals)} manuals')
self.now_manual = self.manuals[0]
def get_page(self, dataid):
data_dict = self.data[dataid]
image_path = os.path.join(self.root, data_dict['image_filename'])
img = cv2.imread(image_path)
img = torch.from_numpy(img)
tokens, bboxes, segment_ids = [], [], []
for region in data_dict['bounding_boxes']:
region_bbox = region['shape']
semantic_class = region['structure']
tokens.extend(self.tokenizer.encode(self.SEMANTIC_CLS2TOKEN[semantic_class], add_special_tokens=False))
bboxes.append(self.convert_bbox(region_bbox))
segment_ids.append(self.SEMANTIC_CLS2ID[semantic_class])
if 'ocr_info' in region:
for ocr_region in region['ocr_info']:
ocr_word = ocr_region['word']
ocr_tokens = self.tokenizer.encode(ocr_word, add_special_tokens=False)
n_tokens = len(ocr_tokens)
tokens.extend(ocr_tokens)
bboxes.extend([self.convert_bbox(ocr_region['bbox'])] * n_tokens)
segment_ids.extend([self.SEMANTIC_CLS2ID[semantic_class]] * n_tokens)
assert len(tokens) == len(bboxes) == len(segment_ids)
if len(bboxes) == 0:
import pdb;pdb.set_trace()
context_ids = torch.tensor(tokens)
context_attn_mask = torch.ones(len(context_ids), dtype=torch.int64)
bboxes = torch.stack(bboxes, dim=0)
segment_ids = torch.tensor(segment_ids)
return img, context_ids, bboxes, segment_ids, context_attn_mask
def __getitem__(self, idx):
qaid = self.manual2qaids[self.now_manual][idx]
dataid = self.qaid2dataid[qaid]
qa_pair = self.qa_pairs[qaid]
question = qa_pair['question']['text']
question_dict = self.tokenizer(question, return_tensors='pt')
question_ids, question_attn_mask = question_dict['input_ids'].squeeze(dim=0), question_dict['attention_mask'].squeeze(dim=0)
question_segment_ids = torch.zeros_like(question_ids)
question_segment_ids.fill_(self.SEMANTIC_CLS2ID['Question'])
img, context_ids, bboxes, segment_ids, context_attn_masks = self.get_page(dataid)
return {
"qaids": qaid,
"dataids": dataid,
"imgs": img,
"bboxes": bboxes,
"question_ids": question_ids,
"question_attn_mask": question_attn_mask,
"question_segment_ids": question_segment_ids,
"context_ids": context_ids,
"context_attn_mask": context_attn_masks,
"segment_ids": segment_ids
}
def set_manual(self, manual_name):
self.now_manual = manual_name
def __len__(self):
return len(self.manual2qaids[self.now_manual])
def mqa_contrast_collate_fn(dict_list):
batch_dict = defaultdict(list)
for d in dict_list:
for key, value in d.items():
batch_dict[key].append(value)
batch_dict['question_ids'] = pad_sequence(batch_dict['question_ids'], batch_first=True, padding_value=0)
batch_dict['question_attn_mask'] = pad_sequence(batch_dict['question_attn_mask'], batch_first=True, padding_value=0)
batch_dict['question_segment_ids'] = pad_sequence(batch_dict['question_segment_ids'], batch_first=True, padding_value=0)
batch_dict['context_ids'] = pad_sequence(batch_dict['context_ids'], batch_first=True, padding_value=0)
batch_dict['context_attn_mask'] = pad_sequence(batch_dict['context_attn_mask'], batch_first=True, padding_value=0)
batch_dict['segment_ids'] = pad_sequence(batch_dict['segment_ids'], batch_first=True, padding_value=0)
return batch_dict | 5,535 | 41.259542 | 135 | py |
MPMQA | MPMQA-master/dataset/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import jsonlines
def read_jsonl(path):
"""Read jsonlines file into python list
args:
path - directory of the jsonlines file
return:
jsonlines file content in List
"""
items = []
with open(path, 'r', encoding='utf-8') as f:
for item in jsonlines.Reader(f):
items.append(item)
return items
def divide_box_grid(box, row_col):
if isinstance(row_col, int):
row_col = (row_col, row_col)
row, col = row_col[0], row_col[1]
with torch.no_grad():
all_width = box[2] - box[0]
all_height = box[3] - box[1]
single_width = all_width / row
single_height = all_height / col
x1,y1,x2,y2 = box[0], box[1], box[2], box[3]
divided_boxes = []
for i in range(row):
for j in range(col):
box_ij = torch.tensor([
x1+i*single_width, y1+j*single_height,
x1+(i+1)*single_width, y1+(j+1)*single_height
])
divided_boxes.append(box_ij)
return divided_boxes
def pad_2d_mask(mask_list, padding_value=0):
"""Perform padding, convert a list of 2-d masks into batch
args:
mask_list [N x N,] - list of 2-d masks
padding_value - the value to occupy vacancy
return:
batch_mask [B x Max_N x Max_N, ] - padded batch mask
"""
max_len = float('-inf')
batch_size = len(mask_list)
for mask in mask_list:
max_len = max(len(mask), max_len)
batch_mask = torch.zeros((batch_size, max_len, max_len), dtype=mask_list[0].dtype)
batch_mask.fill_(padding_value)
for i, mask in enumerate(mask_list):
n, n = mask.shape
batch_mask[i, :n, :n] = mask
return batch_mask
def get_sub_batch(batch, start_idx=0, end_idx=1):
"""Get part of a Dict batch, use for debugging
args:
batch - a dict that contains a batch of data
start_idx - start id of the sub_batch
end_idx - end id of the sub_batch
return:
sub_batch - dict contains the batch data with id in [start_idx, end_idx-1]
"""
assert start_idx < end_idx
sub_batch = {}
for key, value in batch.items():
sub_batch[key] = value[start_idx:end_idx]
return sub_batch
| 2,864 | 30.483516 | 86 | py |
MPMQA | MPMQA-master/dataset/mqa_dataset.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import time
import json
import torch
import random
import numpy as np
from torch.nn.utils.rnn import pad_sequence
from collections import defaultdict
from torch.utils.data import Dataset, DataLoader
from .utils import read_jsonl, pad_2d_mask, divide_box_grid
from transformers import T5TokenizerFast
import torch.distributed as dist
from .const import VRM_SEMANTIC_TOKENS, VRM_SEMANTIC_CLS2ID, VRM_SEMANTIC_CLS2TOKEN, \
VRM_FINE_GRAIN_CLS
class MQADataset(Dataset):
def __init__(self, args, root, tokenizer, split='train', task='qa'):
super().__init__()
self.args = args
self.root = root
self.tokenizer = tokenizer
self.split = split
self.task = task
data_path = os.path.join(root, 'data', f'{split}.jsonl')
self.data = read_jsonl(data_path)
self.rid2cls = {}
for data_dict in self.data:
for region in data_dict['bounding_boxes']:
self.rid2cls[region['id']] = region['structure']
self.qa_pairs, self.qaid2dataid = self.get_qa_pairs()
self.set_const()
special_tokens = self.SEMANTIC_TOKENS.copy()
if self.args.mask:
special_tokens.append('<mask>')
self.tokenizer.add_special_tokens({"additional_special_tokens":special_tokens})
print('Special tokens:')
print(self.tokenizer.SPECIAL_TOKENS_ATTRIBUTES)
self.subwords = list(self.tokenizer.get_vocab().keys())
print(f'Total subwords: {len(self.subwords)}')
def set_use_retrieved_qa2dataid(self):
assert self.args.use_retrieved_qa2dataid
with open(self.args.retrieved_qa2dataid[self.split], 'r') as f:
retrieved_qa2dataid = json.load(f)
retrieved_qa2dataid = {int(key): value for key, value in retrieved_qa2dataid.items()}
print(f'Evaluate QA/sd with retrieved pages in {self.args.retrieved_qa2dataid[self.split]}')
assert len(retrieved_qa2dataid.keys() & self.qaid2dataid.keys()) == len(self.qaid2dataid.keys()) == len(retrieved_qa2dataid.keys())
self.qaid2dataid = retrieved_qa2dataid
def set_const(self):
self.SEMANTIC_TOKENS = VRM_SEMANTIC_TOKENS
self.SEMANTIC_CLS2ID = VRM_SEMANTIC_CLS2ID
self.SEMANTIC_CLS2TOKEN = VRM_SEMANTIC_CLS2TOKEN
self.FINE_GRAIN_CLS = VRM_FINE_GRAIN_CLS
def get_qa_pairs(self):
qaid = 0
qa_pairs = []
qaid2dataid = {}
for dataid, item in enumerate(self.data):
for qa_item in item['qa_data']:
qa_pairs.append(qa_item)
qaid2dataid[qaid] = dataid
qaid += 1
print(f'Total {len(qa_pairs)} qa pairs')
return qa_pairs, qaid2dataid
def convert_bbox(self, d):
x1, y1, w, h = d['x'], d['y'], d['width'], d['height']
x2, y2 = x1+w, y1+h
return torch.tensor([x1,y1,x2,y2])
def merge_bbox(self, box_list):
x1, y1, x2, y2 = 1e9, 1e9, -1, -1
for box in box_list:
x1 = min(x1, box[0])
y1 = min(y1, box[1])
x2 = max(x2, box[2])
y2 = max(y2, box[3])
return torch.tensor([x1,y1,x2,y2])
def __getitem__(self, idx):
qa_pair = self.qa_pairs[idx]
dataid = self.qaid2dataid[idx]
# import pdb;pdb.set_trace()
question = qa_pair['question']['text']
answer = qa_pair['answer']['text']
relevant_rids = qa_pair['answer']['relevant']
question_dict = self.tokenizer(question, return_tensors='pt')
question_ids, question_attn_mask = question_dict['input_ids'].squeeze(dim=0), question_dict['attention_mask'].squeeze(dim=0)
question_segment_ids = torch.zeros_like(question_ids)
question_segment_ids.fill_(self.SEMANTIC_CLS2ID['Question'])
# T5 treat <pad> as start token
# Default: No start token is inserted in position 0
answer_all = self.tokenizer('<pad>'+answer, return_tensors='pt').input_ids.squeeze(dim=0)
answer_ids = answer_all[:-1]
answer_labels = answer_all[1:]
answer_attn_mask = torch.tril(torch.ones((len(answer_ids), len(answer_ids)), dtype=question_attn_mask.dtype))
region_positions = defaultdict(list)
data_dict = self.data[dataid]
image_path = os.path.join(self.root, data_dict['image_filename'])
img = cv2.imread(image_path)
retry = 0
while img is None:
time.sleep(1)
img = cv2.imread(image_path)
retry += 1
if retry > 10:
assert img is not None, f'Retrying to read {image_path} for 10 times but failed'
img = torch.from_numpy(img)
bboxes = []
segment_ids = []
region_ids = []
related_region_labels = []
tokens = []
mlm_labels = [] # for whole word mlm
for r, region in enumerate(data_dict['bounding_boxes']):
is_related_region = int((region['id'] in relevant_rids))
region_positions[region['id']].append(len(tokens))
region_ids.append(region['id'])
region_bbox = region['shape']
semantic_class = region['structure']
tokens.extend(self.tokenizer.encode(self.SEMANTIC_CLS2TOKEN[semantic_class], add_special_tokens=False))
bboxes.append(self.convert_bbox(region_bbox))
segment_ids.append(self.SEMANTIC_CLS2ID[semantic_class])
related_region_labels.append(is_related_region)
if 'ocr_info' in region:
for ocr_region in region['ocr_info']:
ocr_word = ocr_region['word']
ocr_tokens = self.tokenizer.encode(ocr_word, add_special_tokens=False)
n_tokens = len(ocr_tokens)
tokens.extend(ocr_tokens)
bboxes.extend([self.convert_bbox(ocr_region['bbox'])] * n_tokens)
segment_ids.extend([self.SEMANTIC_CLS2ID[semantic_class]] * n_tokens)
if self.args.va_type == 'tokenwise':
related_region_labels.extend([is_related_region] * n_tokens)
elif self.args.va_type == 'global':
related_region_labels.extend([-1] * n_tokens)
# if len(tokens) < self.max_page_len:
region_positions[region['id']].append(len(tokens))
# else:
# region_positions[region['id']].append(self.max_page_len)
assert len(tokens) == len(bboxes) == len(segment_ids) == len(related_region_labels), "length mismatch"
if len(bboxes) == 0:
import pdb;pdb.set_trace()
context_ids = torch.tensor(tokens)
context_attn_mask = torch.ones(len(context_ids), dtype=question_attn_mask.dtype)
bboxes = torch.stack(bboxes, dim=0)
segment_ids = torch.tensor(segment_ids)
related_region_labels = torch.tensor(related_region_labels)
mlm_labels = torch.tensor(mlm_labels)
qa_ids = qa_pair['id']
return {
'qa_ids': qa_ids,
'image_paths': image_path,
'imgs': img,
'question_ids': question_ids,
'question_attn_mask': question_attn_mask,
'question_segment_ids': question_segment_ids,
'answer_ids': answer_ids,
'answer_attn_mask': answer_attn_mask,
'answer_labels': answer_labels,
'context_ids': context_ids[:self.args.max_page_len],
'context_attn_mask': context_attn_mask[:self.args.max_page_len],
'bboxes': bboxes[:self.args.max_page_len],
'segment_ids': segment_ids[:self.args.max_page_len],
'related_region_labels': related_region_labels[:self.args.max_page_len],
'region_positions': region_positions,
'related_regions': relevant_rids,
'mlm_labels': mlm_labels[:self.args.max_page_len]
}
def __len__(self):
return len(self.qa_pairs)
def mqa_collate_fn(dict_list):
batch_dict = defaultdict(list)
for d in dict_list:
for key, value in d.items():
batch_dict[key].append(value)
batch_dict['question_ids'] = pad_sequence(batch_dict['question_ids'], batch_first=True, padding_value=0)
batch_dict['question_attn_mask'] = pad_sequence(batch_dict['question_attn_mask'], batch_first=True, padding_value=0)
batch_dict['question_segment_ids'] = pad_sequence(batch_dict['question_segment_ids'], batch_first=True, padding_value=0)
batch_dict['answer_ids'] = pad_sequence(batch_dict['answer_ids'], batch_first=True, padding_value=0)
batch_dict['answer_labels'] = pad_sequence(batch_dict['answer_labels'], batch_first=True, padding_value=0)
batch_dict['answer_attn_mask'] = pad_2d_mask(batch_dict['answer_attn_mask'], padding_value=0)
batch_dict['context_ids'] = pad_sequence(batch_dict['context_ids'], batch_first=True, padding_value=0)
batch_dict['context_attn_mask'] = pad_sequence(batch_dict['context_attn_mask'], batch_first=True, padding_value=0)
batch_dict['segment_ids'] = pad_sequence(batch_dict['segment_ids'], batch_first=True, padding_value=0)
batch_dict['related_region_labels'] = pad_sequence(batch_dict['related_region_labels'], batch_first=True, padding_value=-1)
return batch_dict
def get_mqa_loader(args, root, tokenizer, batch_size, split='train', num_workers=4, eval_on_train=False):
dataset = MQADataset(args, root, tokenizer, split)
sampler = None
if hasattr(args, 'deepspeed') and args.deepspeed:
if split == 'train' and not eval_on_train:
sampler = torch.utils.data.DistributedSampler(
dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=True
)
else:
sampler = torch.utils.data.DistributedSampler(
dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=False
)
dataloader = DataLoader(
dataset=dataset,
sampler=sampler,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=mqa_collate_fn,
shuffle=(split=='train' and sampler is None and not eval_on_train),
drop_last=(split=='train' and not eval_on_train)
)
return dataloader
| 11,026 | 42.413386 | 139 | py |
MPMQA | MPMQA-master/models/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.distributed as dist
class MLP(nn.Module):
def __init__(self, d_in, d_hidden, d_out, non_linear='relu', res=False):
super().__init__()
self.linear_1 = nn.Linear(d_in, d_hidden)
self.linear_2 = nn.Linear(d_hidden, d_out)
if non_linear == 'relu':
self.activate = nn.ReLU()
self.res = res
if self.res:
assert d_in == d_out
def forward(self, x):
res = x
x = self.linear_1(x)
x = self.activate(x)
x = self.linear_2(x)
if self.res:
x = x + res
return x
class NCELoss(nn.Module):
def __init__(self, t=1.0, bidirectional=False):
super().__init__()
self.t = t
self.bidirectional = bidirectional
def get_loss(self, sim_matrix):
sim_matrix = sim_matrix / self.t
logpt = torch.nn.functional.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
def forward(self, sim_matrix_ij, sim_matrix_ji=None):
if self.bidirectional:
if sim_matrix_ji is None:
sim_matrix_ji = sim_matrix_ij.t()
loss = (self.get_loss(sim_matrix_ij) + self.get_loss(sim_matrix_ji)) / 2
else:
loss = self.get_loss(sim_matrix_ij)
return loss
class AllGather(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor):
output = [torch.empty_like(tensor) for _ in range(dist.get_world_size())]
torch.distributed.all_gather(output, tensor)
ctx.rank = dist.get_rank()
ctx.batch_size = tensor.shape[0]
return torch.cat(output, dim=0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)],
None,
)
class AllGatherBatch(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor):
output = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(output, tensor)
output_pad = pad_features(output)
ctx.rank = dist.get_rank()
ctx.batch_size = tensor.shape[0]
ctx.length = tensor.shape[1]
return output_pad
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1), :ctx.length],
None,
)
def pad_features(tensors):
shapes = [t.shape for t in tensors]
total_batch = sum([s[0] for s in shapes])
rank = dist.get_rank()
dtype = tensors[rank].dtype
device = tensors[rank].device
requires_grad = tensors[rank].requires_grad
padded_shape = [total_batch]
for i in range(1, len(shapes[0])):
padded_size_i = 0
for s in shapes:
padded_size_i = max(padded_size_i, s[i])
padded_shape.append(padded_size_i)
padded_tensor = torch.zeros(padded_shape, device=device, dtype=dtype, requires_grad=requires_grad)
b_start = 0
for i, tensor in enumerate(tensors):
padded_tensor[b_start:b_start+tensor.size(0), :tensor.size(1)] = tensor
b_start += tensor.size(0)
return padded_tensor | 4,078 | 32.434426 | 102 | py |
MPMQA | MPMQA-master/models/mqa_model.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.distributed as dist
import random
from detector.ROIFeatExtractor import ROIFeatExtractor
from models.utils import NCELoss, AllGather, AllGatherBatch, pad_features, MLP
from transformers import T5ForConditionalGeneration, T5TokenizerFast
from transformers.generation_beam_search import BeamSearchScorer
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from torch.nn.functional import normalize as norm
class MQAT5Model(nn.Module):
def __init__(self, args, pretrained_dir='t5-base'):
super().__init__()
self.args = args
self.roi_extractor = ROIFeatExtractor(args.roi_config, args.roi_model, args.roi_bua)
self.roi_extractor.eval()
self.t5 = T5ForConditionalGeneration.from_pretrained(pretrained_dir)
self.encoder = self.t5.get_encoder()
self.decoder = self.t5.get_decoder()
self.lm_head = self.t5.get_output_embeddings()
self.tokenizer = T5TokenizerFast.from_pretrained(pretrained_dir)
self.model_dim = self.t5.model_dim
self.embed_token = self.t5.shared
self.segment_embedding = nn.Embedding(11, self.model_dim, padding_idx=0)
self.apperance_embedding = nn.Linear(2048, self.model_dim)
self.location_embedding = nn.Linear(4, self.model_dim)
if args.visual_answer:
if args.va_module_type == 'map':
self.saliency_detector = nn.Linear(self.model_dim, 2)
elif args.va_module_type == 'linear':
self.saliency_detector = nn.Sequential(
nn.Linear(self.model_dim, self.model_dim),
nn.Linear(self.model_dim, 2)
)
elif args.va_module_type == 'mlp':
self.saliency_detector = nn.Sequential(
MLP(self.model_dim, self.model_dim, self.model_dim),
nn.Linear(self.model_dim, 2)
)
else:
raise NotImplementedError
if args.page_contrast:
if args.page_contrast_module_type == 'linear':
self.page_contrast_module = nn.Linear(self.model_dim, self.model_dim)
elif args.page_contrast_module_type == 'mlp':
self.page_contrast_module = MLP(self.model_dim, self.model_dim, self.model_dim, res=True)
elif args.page_contrast_module_type is None:
pass
else:
raise NotImplementedError
self.max_dec_len = args.max_dec_len
self.ce_loss = nn.CrossEntropyLoss(ignore_index=0, reduction='mean')
self.bce_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='mean', label_smoothing=self.args.va_label_smoothing)
self.nce_loss = NCELoss(t=args.page_contrast_t, bidirectional=args.page_contrast_bidirection)
def resize_token_embeddings(self):
self.t5.resize_token_embeddings(len(self.tokenizer))
def norm_bboxes(self, bboxes):
with torch.no_grad():
x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
w = x_max - x_min
h = y_max - y_min
normed_bboxes = torch.stack([x_min / w, y_min / h, x_max / w, y_max / h], dim=1)
return normed_bboxes
def get_direction(self, region_boxes):
box_centers = torch.stack([region_boxes[:,0]+region_boxes[:,2],
region_boxes[:,1]+region_boxes[:,3]], dim=1) / 2
relative = norm(box_centers.unsqueeze(dim=0) - box_centers.unsqueeze(dim=1), dim=2)
angle_upper = torch.acos(relative[:,:,0])
angle_bottom = angle_upper + 0.999 * torch.pi
angle = torch.where(relative[:,:,1]>0, angle_upper, angle_bottom)
direction_labels = (angle * 4 / torch.pi).long()
direction_labels = direction_labels - (direction_labels.diag()+1).diag_embed()
return direction_labels
def combine_embedding_and_mask(self, question_embeddings, question_mask, context_embeddings, context_mask):
batch_size = question_mask.size(0)
question_lengths = question_mask.sum(dim=-1)
context_lengths = context_mask.sum(dim=-1)
total_lengths = question_lengths + context_lengths
max_len = total_lengths.max()
total_mask = torch.zeros((batch_size, max_len),
dtype=question_mask.dtype, device=question_mask.device)
total_embeddings = torch.zeros((batch_size, max_len, self.model_dim),
dtype=question_embeddings.dtype, device=question_embeddings.device)
for i in range(batch_size):
q_length_i = question_lengths[i]
c_length_i = context_lengths[i]
total_embeddings[i, :q_length_i] = question_embeddings[i, :q_length_i]
total_embeddings[i, q_length_i:q_length_i+c_length_i] = context_embeddings[i, :c_length_i]
total_mask[i, :total_lengths[i]] = 1
return total_embeddings, total_mask
def divide_embedding(self, all_embeddings, question_mask, context_mask):
batch_size = all_embeddings.size(0)
question_lengths = question_mask.sum(dim=-1)
context_lengths = context_mask.sum(dim=-1)
q_max_len = question_lengths.max()
c_max_len = context_lengths.max()
question_embeddings = torch.zeros((batch_size, q_max_len, self.model_dim),
dtype=all_embeddings.dtype, device=all_embeddings.device)
context_embeddings = torch.zeros((batch_size, c_max_len, self.model_dim),
dtype=all_embeddings.dtype, device=all_embeddings.device)
for i in range(batch_size):
q_length_i = question_lengths[i]
c_length_i = context_lengths[i]
question_embeddings[i, :q_length_i] = all_embeddings[i, :q_length_i]
context_embeddings[i, :c_length_i] = all_embeddings[i, q_length_i:q_length_i+c_length_i]
return question_embeddings, context_embeddings
def get_question_embedding(self, question_ids, question_segment_ids):
question_embed = self.embed_token(question_ids)
question_segment_embed = self.segment_embedding(question_segment_ids)
question_embed = question_embed + question_segment_embed
return question_embed
def get_context_embedding(self, context_ids, imgs, bboxes, segment_ids):
context_embed = self.embed_token(context_ids)
segment_embed = self.segment_embedding(segment_ids)
context_embed += segment_embed
# apperance_embed
with torch.no_grad():
roi_features = self.roi_extractor.float()(imgs, bboxes)
roi_features = [f.type(self.apperance_embedding.weight.dtype) for f in roi_features]
apperance_embed_list = [self.apperance_embedding(f) for f in roi_features]
apperance_embed = torch.zeros_like(context_embed)
for i, embed in enumerate(apperance_embed_list):
apperance_embed[i, :len(embed)] = embed
context_embed += apperance_embed
# location_embed
normed_bboxes = [self.norm_bboxes(bbox).type(self.location_embedding.weight.dtype) for bbox in bboxes]
location_embed_list = [self.location_embedding(b) for b in normed_bboxes]
location_embed = torch.zeros_like(context_embed)
for i, embed in enumerate(location_embed_list):
location_embed[i, :len(embed)] = embed
context_embed += location_embed
return context_embed
def get_embeddings_and_mask(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
question_embed = self.get_question_embedding(question_ids, question_segment_ids)
context_embed = self.get_context_embedding(context_ids, imgs, bboxes, segment_ids)
input_embeds, attn_mask = self.combine_embedding_and_mask(question_embed, question_attn_mask, context_embed, context_attn_mask)
return input_embeds, attn_mask
def context_hidden_weight(self, question_hidden, question_mask, context_hidden, context_mask, method, **kwargs):
if method == 'hard':
context_weights = kwargs['context_weights']
context_hidden = context_hidden * context_weights
all_hidden, attn_mask = self.combine_embedding_and_mask(question_hidden, question_mask, context_hidden, context_mask)
return all_hidden, attn_mask
else:
raise NotImplementedError
def beam_search(self, beam_size, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
batch_size = question_ids.size(0)
beam_scorer = BeamSearchScorer(batch_size, beam_size, device=question_ids.device, **kwargs)
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
beam_size, dim=0)
attn_mask = attn_mask.repeat_interleave(
beam_size, dim=0
)
decoder_input_ids = torch.zeros((batch_size, 1), dtype=question_ids.dtype, device=question_ids.device)
decoder_input_ids = decoder_input_ids.repeat_interleave(
beam_size, dim=0
)
# import pdb;pdb.set_trace()
outputs = self.t5.beam_search(
encoder_outputs=encoder_outputs, attention_mask=attn_mask, input_ids=decoder_input_ids, beam_scorer=beam_scorer, max_length=self.max_dec_len
)
predictions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return outputs, predictions
def greedy_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
batch_size = input_embeds.size(0)
# <pad> as start token
decoder_input_ids = torch.zeros((batch_size, 1), dtype=question_ids.dtype, device=question_ids.device)
out = self.t5(inputs_embeds=input_embeds, attention_mask=attn_mask,
decoder_input_ids=decoder_input_ids, return_dict=True, use_cache=True)
past_key_values = out.past_key_values
encoder_outputs = (out.encoder_last_hidden_state,)
outputs = []
logits = out.logits
outputs.append(logits.argmax(dim=-1))
for i in range(self.max_dec_len-1):
out = self.t5(encoder_outputs=encoder_outputs, attention_mask=attn_mask, past_key_values=past_key_values,
decoder_input_ids=outputs[-1], use_cache=True)
past_key_values = out.past_key_values
logits = out.logits
outputs.append(logits.argmax(dim=-1))
outputs = torch.cat(outputs, dim=1)
predictions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return outputs, predictions
def mlm_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, mlm_labels, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=True,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
_, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
if self.t5.config.tie_word_embeddings:
context_hidden_state = context_hidden_state * (self.model_dim**-0.5)
probs = self.mlm_head(context_hidden_state).argmax(dim=-1)
probs = probs.flatten()
labels = mlm_labels.flatten()
indices = torch.where(labels)[0]
acc_num = (probs[indices] == labels[indices]).sum()
total_num = len(indices)
return float(acc_num), float(total_num)
def visual_answer_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, region_positions, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=True,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
question_hidden_state, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
saliency_probs = self.saliency_detector(context_hidden_state).softmax(dim=-1)
def aggregate_score(scores, method='mean'):
if method == 'mean':
return scores.mean()
elif method == 'first':
if len(scores) == 0:
return torch.tensor(0.0)
else:
return scores[0]
pred_related_regions = [[] for _ in range(len(saliency_probs))]
for i, saliency_prob in enumerate(saliency_probs):
region_score_list = []
for region_id, region_position in region_positions[i].items():
token_probs = saliency_prob[region_position[0]:region_position[1], 1]
method = 'mean' if self.args.va_type=='tokenwise' else 'first'
region_prob = aggregate_score(token_probs, method)
region_score_list.append((region_id, region_prob))
region_score_list.sort(key=lambda x: x[1], reverse=True)
nums = 0
for (region_id, score) in region_score_list:
if score >= 0.5:
nums += 1
pred_related_regions[i].append(region_id)
elif nums < self.args.min_va:
nums += 1
pred_related_regions[i].append(region_id)
else:
break
return pred_related_regions
def get_global_indices(self, region_positions):
batchsize = len(region_positions)
global_indices = [[] for _ in range(batchsize)]
for i in range(batchsize):
for region_id, region_position in region_positions[i].items():
global_indices[i].append(region_position[0])
return global_indices
def cross_encoding(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
return encoder_last_hidden_states, attn_mask
def encoding_question(self, question_ids, question_attn_mask, question_segment_ids, return_hidden=False):
question_embeddings = self.get_question_embedding(question_ids, question_segment_ids)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=question_embeddings,
attention_mask=question_attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
if return_hidden:
return encoder_last_hidden_states
else:
return encoder_last_hidden_states[:, 0]
def encoding_context(self, context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=False):
context_embeddings = self.get_context_embedding(context_ids, imgs, bboxes, segment_ids)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=context_embeddings,
attention_mask=context_attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
if return_hidden:
return encoder_last_hidden_states
else:
return encoder_last_hidden_states[:, 0]
def two_stream_encoding(self, question_ids, question_attn_mask, question_segment_ids,
context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=False, **kwargs):
question_features = self.encoding_question(question_ids, question_attn_mask, question_segment_ids, return_hidden)
context_features = self.encoding_context(context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden)
return question_features, context_features
def similarity_score(self, question_hiddens, context_hiddens, question_attn_mask=None, context_attn_mask=None):
assert len(question_hiddens.shape) == len(question_hiddens.shape) == 3
assert len(question_attn_mask.shape) == len(context_attn_mask.shape) == 2
assert len(question_attn_mask) == len(question_hiddens)
assert len(context_attn_mask) == len(context_hiddens)
b1, t1, d = question_hiddens.shape
b2, t2, d = context_hiddens.shape
score_matrix = torch.zeros((b1, b2), device=question_hiddens.device, dtype=question_hiddens.dtype, requires_grad=False)
score_matrix_2 = torch.zeros((b2, b1), device=question_hiddens.device, dtype=question_hiddens.dtype, requires_grad=False)
# token_score_matrix: b1 x b2 x t1 x t2
token_score_matrix = torch.einsum('ind,jmd->ijnm', question_hiddens, context_hiddens)
for i in range(b1):
for j in range(b2):
# t1 x t2
token_score_matrix_ij = token_score_matrix[i, j]
token_score_matrix_ij = token_score_matrix_ij[:question_attn_mask[i].sum(), :context_attn_mask[j].sum()]
score = token_score_matrix_ij.max(dim=-1)[0].mean(dim=0)
score_2 = token_score_matrix_ij.t().max(dim=-1)[0].mean(dim=0)
score_matrix[i, j] = score_matrix[i, j].clone() + score.clone()
score_matrix_2[j, i] = score_matrix_2[j, i].clone() + score_2.clone()
score_matrix.requires_grad_(True)
score_matrix_2.requires_grad_(True)
return score_matrix, score_matrix_2
def pad_features(self, tensors):
# tensors: B x T x D
shapes = [t.shape for t in tensors]
total_batch = sum([s[0] for s in shapes])
dtype = tensors[0].dtype
device = tensors[0].device
requires_grad = tensors[0].requires_grad
padded_shape = [total_batch]
for i in range(1, len(shapes[0])):
padded_size_i = 0
for s in shapes:
padded_size_i = max(padded_size_i, s[i])
padded_shape.append(padded_size_i)
padded_tensor = torch.zeros(padded_shape, device=device, dtype=dtype, requires_grad=requires_grad)
b_start = 0
for i, tensor in enumerate(tensors):
padded_tensor[b_start:b_start+tensor.size(0), :tensor.size(1)] = tensor
b_start += tensor.size(0)
return padded_tensor
def forward_page_contrast_global(self, question_hiddens, context_hiddens, compute_loss=True):
question_features = question_hiddens[:, 0]
context_features = context_hiddens[:, 0]
if self.args.page_contrast_module_type is not None:
question_features = self.page_contrast_module(question_features)
context_features = self.page_contrast_module(context_features)
question_features = norm(question_features, dim=-1)
context_features = norm(context_features, dim=-1)
question_features = AllGather.apply(question_features)
context_features = AllGather.apply(context_features)
dist.barrier()
score_matrix = torch.matmul(question_features, context_features.t())
if compute_loss:
pc_loss = self.nce_loss(score_matrix)
return pc_loss, score_matrix
else:
return score_matrix
def forward_page_contrast_tokenwise(self, question_hiddens, context_hiddens, question_attn_mask, context_attn_mask, compute_loss=True):
if self.args.page_contrast_module_type is not None:
question_hiddens = self.page_contrast_module(question_hiddens)
context_hiddens = self.page_contrast_module(context_hiddens)
question_features = norm(question_hiddens, dim=-1)
context_features = norm(context_hiddens, dim=-1)
question_features = AllGatherBatch.apply(question_features)
context_features = AllGatherBatch.apply(context_features)
_question_attn_mask = AllGatherBatch.apply(question_attn_mask)
_context_attn_mask = AllGatherBatch.apply(context_attn_mask)
dist.barrier()
score_matrix_qc, score_matrix_cq = self.similarity_score(question_features, context_features, _question_attn_mask, _context_attn_mask)
if compute_loss:
tpc_loss = self.nce_loss(score_matrix_qc, score_matrix_cq)
return tpc_loss, score_matrix_qc, score_matrix_cq
else:
return score_matrix_qc, score_matrix_cq
def forward_salient_detection(self, encoder_last_hidden_states, question_attn_mask, context_attn_mask, related_region_labels=None, compute_loss=True):
_, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
saliency_logits = self.saliency_detector(context_hidden_state)
if compute_loss:
saliency_logits_reshaped = saliency_logits.reshape(-1, saliency_logits.size(-1))
sd_loss = self.bce_loss(saliency_logits_reshaped, related_region_labels.flatten())
return sd_loss, saliency_logits
else:
return saliency_logits
def forward_sep_qa(self, question_hiddens, context_hiddens, question_attn_mask, context_attn_mask,
answer_ids, answer_attn_mask, answer_labels):
encoder_last_hidden_states, attn_mask = \
self.combine_embedding_and_mask(question_hiddens, question_attn_mask, context_hiddens, context_attn_mask)
return self.forward_text_answer(encoder_last_hidden_states, attn_mask, answer_ids, answer_attn_mask, answer_labels)
def forward_text_answer(self, encoder_last_hidden_states, attn_mask,
answer_ids, answer_attn_mask, answer_labels,
question_attn_mask=None, context_attn_mask=None, related_region_labels=None,
saliency_logits=None, region_positions=None, now_step=None, total_step=None):
assert len(encoder_last_hidden_states) > 0
decoder_outputs = self.decoder(encoder_hidden_states=encoder_last_hidden_states, encoder_attention_mask=attn_mask,
input_ids=answer_ids, attention_mask=answer_attn_mask, return_dict=True)
decoder_last_hidden_states = decoder_outputs.last_hidden_state
if self.t5.config.tie_word_embeddings:
decoder_last_hidden_states = decoder_last_hidden_states * (self.model_dim**-0.5)
logits = self.lm_head(decoder_last_hidden_states)
logits = logits.reshape(-1, logits.size(-1))
labels = answer_labels.flatten()
qa_loss = self.ce_loss(logits, labels)
return qa_loss
def forward(self, question_ids, answer_ids, context_ids, imgs, bboxes,
question_attn_mask, answer_attn_mask, context_attn_mask,
answer_labels, segment_ids, question_segment_ids, related_region_labels, **kwargs):
loss_dict = {}
# Cases of separatly encoding question and page
if self.args.page_contrast:
question_hiddens, context_hiddens = self.two_stream_encoding(question_ids, question_attn_mask, question_segment_ids,
context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=True)
# Jointly encoding
if not self.args.no_cross:
encoder_last_hidden_states, attn_mask = self.cross_encoding(question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
if self.args.page_contrast:
if self.args.page_contrast_type == 'global':
pc_loss, _ = self.forward_page_contrast_global(question_hiddens, context_hiddens)
loss_dict['loss_pc'] = pc_loss
elif self.args.page_contrast_type == 'tokenwise':
tpc_loss, _, _ = self.forward_page_contrast_tokenwise(question_hiddens, context_hiddens, question_attn_mask, context_attn_mask)
loss_dict['loss_tpc'] = tpc_loss
else:
raise NotImplementedError
# Calculate visual answering loss
if self.args.visual_answer:
va_loss, saliency_logits = self.forward_salient_detection(encoder_last_hidden_states, question_attn_mask, context_attn_mask, related_region_labels)
loss_dict['loss_va'] = va_loss
# Calculate question answer loss
if self.args.text_answer:
region_positions = kwargs.get('region_positions', None)
now_step = kwargs.get('now_step', None)
total_step = kwargs.get('total_step', None)
if not self.args.visual_answer:
saliency_logits = None
qa_loss = self.forward_text_answer(encoder_last_hidden_states, attn_mask, answer_ids, answer_attn_mask, answer_labels,
question_attn_mask, context_attn_mask, related_region_labels, saliency_logits, region_positions, now_step, total_step)
loss_dict['loss_qa'] = qa_loss
# Calculate total loss
loss = 0.0
for _, loss_value in loss_dict.items():
loss += loss_value
loss_dict['loss'] = loss
return loss_dict
| 28,473 | 50.397112 | 159 | py |
chatgpt-refusals | chatgpt-refusals-main/bert_results.py | import os
import torch
from transformers import BertTokenizerFast, BertForSequenceClassification, Trainer, TrainingArguments
import data_processing
class TextDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
class TextClassification:
def __init__(self, X, y, label_encoder):
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
self.label_encoder = label_encoder
self.y = self.label_encoder.transform(y)
self.dataset = self._prepare_dataset(X, self.y)
def _prepare_dataset(self, X, y):
encodings = self.tokenizer(X, truncation=True, padding='longest', max_length=512)
return TextDataset(encodings, y)
def inference(X, y, text_source):
# Load the label encoder that was saved during training
label_encoder = torch.load(f'bert_assets/{text_source}/label_encoder.pth')
# Prepare the dataset for inference using the TextClassification class
classifier = TextClassification(X, y, label_encoder)
# Load the model
model = BertForSequenceClassification.from_pretrained(f'bert_assets/{text_source}')
model = model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
trainer = Trainer(model=model, args=TrainingArguments(output_dir='bert_assets'))
# Run inference
predictions = trainer.predict(classifier.dataset)
predicted_labels = classifier.label_encoder.inverse_transform(predictions.predictions.argmax(-1))
return predicted_labels
def evaluate_model(dataset, text_source):
filepath = f'data/{dataset}.json'
print(f'Classifying {text_source}s in {filepath}...')
# Load and split the data
X, y = data_processing.preprocess_data(filepath, text_source)
if dataset == 'all_hand_labeled':
_, _, X_test, _, _, y_test = data_processing.split_data(X, y)
elif dataset == 'quora_insincere_hand_labeled':
X_test = X
y_test = y
# Run inference to get the model's predictions on the test set
predictions = inference(X_test, y_test, text_source)
# Calculate and print the model's accuracy
correct_predictions = sum(pred == true for pred, true in zip(predictions, y_test))
accuracy = correct_predictions / len(y_test)
print(f'Accuracy: {accuracy*100:.2f}%')
if __name__ == '__main__':
# Disable tokenizers parallelism to avoid a warning
os.environ["TOKENIZERS_PARALLELISM"] = "false"
evaluate_model('all_hand_labeled', 'response')
print()
evaluate_model('quora_insincere_hand_labeled', 'prompt')
| 2,872 | 35.367089 | 101 | py |
21cmVAE | 21cmVAE-main/VeryAccurateEmulator/emulator.py | import h5py
import tensorflow as tf
from tqdm.keras import TqdmCallback
import numpy as np
from VeryAccurateEmulator import __path__
import VeryAccurateEmulator.preprocess as pp
PATH = __path__[0] + "/"
def _gen_model(in_dim, hidden_dims, out_dim, activation_func, name=None):
"""
Generate a new keras model.
Parameters
----------
in_dim : int or None
The dimension of the input layer of the model. Should be None if the
model is succeeding another model (e.g. a decoder in an autoencoder).
hidden_dims : list of ints
The dimension of the hidden layers of the model.
out_dim : int
The dimension of the output layer of the model.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
name : str or None
Name of the model. Default : None.
Returns
-------
model : tf.keras.Model
The generated keras model.
"""
layers = []
if in_dim is not None:
input_layer = tf.keras.Input(shape=(in_dim,))
layers.append(input_layer)
if len(hidden_dims):
for dim in hidden_dims:
layer = tf.keras.layers.Dense(dim, activation=activation_func)
layers.append(layer)
output_layer = tf.keras.layers.Dense(out_dim)
layers.append(output_layer)
model = tf.keras.Sequential(layers, name=name)
return model
def relative_mse_loss(signal_train):
"""
The square of the FoM in the paper, in units of standard deviation as the
signals are preproccesed.
Parameters
----------
signal_train : np.ndarray
Training signals.
Returns
-------
loss_function : callable
The loss function.
"""
def loss_function(y_true, y_pred):
# unpreproc signal to get the amplitude
mean = tf.convert_to_tensor(
np.mean(signal_train, axis=0) / np.std(signal_train)
)
signal = y_true + mean
# get amplitude in units of standard deviation of signals
reduced_amp = tf.math.reduce_max(
tf.abs(signal), axis=1, keepdims=False
)
# loss is mse / square of amplitude
loss = tf.keras.metrics.mean_squared_error(y_true, y_pred)
loss /= tf.keras.backend.square(reduced_amp)
return loss
return loss_function
NU_0 = 1420405751.7667 # Hz, rest frequency of 21-cm line
def redshift2freq(z):
"""
Convert redshift to frequency.
Parameters
----------
z : float or np.ndarray
The redshift or array of redshifts to convert.
Returns
-------
nu : float or np.ndarray
The corresponding frequency or array of frequencies in MHz.
"""
nu = NU_0 / (1 + z)
nu /= 1e6 # convert to MHz
return nu
def freq2redshift(nu):
"""
Convert frequency to redshfit.
Parameters
----------
nu : float or np.ndarray
The frequency or array of frequencies in MHz to convert.
Returns
-------
z : float or np.ndarray
The corresponding redshift or array of redshifts.
"""
nu *= 1e6 # to Hz
z = NU_0 / nu - 1
return z
def error(
true_signal, pred_signal, relative=True, nu_arr=None, flow=None, fhigh=None
):
"""
Compute the error (Eq. 1 in the paper) given the true and predicted
signal(s).
Parameters
----------
true_signal : np.ndarray
The true signal(s). An array of temperature for different redshifts
or frequencies. For multiple signals must each row correspond to a
signal.
pred_signal : np.ndarray
The predicted signal(s). Must have the same shape as true_signal.
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
nu_arr : np.ndarray or None
The frequency array corresponding to the signals. Needed for computing
the error in different frequency bands. Default : None.
flow : float or None
The lower bound of the frequency band to compute the error in. Cannot
be set without nu_arr. Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in. Cannot
be set without nu_arr. Default : None.
Returns
-------
err : float or np.ndarray
The computed errors. An array if multiple signals were input.
Raises
------
ValueError :
If nu_arr is None and flow or fhigh are not None.
"""
if (flow or fhigh) and nu_arr is None:
raise ValueError(
"No frequency array is given, cannot compute error in specified"
"frequency band."
)
if len(pred_signal.shape) == 1:
pred_signal = np.expand_dims(pred_signal, axis=0)
true_signal = np.expand_dims(true_signal, axis=0)
if flow and fhigh:
f = np.argwhere((nu_arr >= flow) & (nu_arr <= fhigh))[:, 0]
elif flow:
f = np.argwhere(nu_arr >= flow)
elif fhigh:
f = np.argwhere(nu_arr <= fhigh)
if flow or fhigh:
pred_signal = pred_signal[:, f]
true_signal = true_signal[:, f]
err = np.sqrt(np.mean((pred_signal - true_signal) ** 2, axis=1))
if relative: # give error as fraction of amplitude in the desired band
err /= np.max(np.abs(true_signal), axis=1)
err *= 100 # %
return err
# default parameters
hidden_dims = [288, 352, 288, 224]
redshifts = np.linspace(5, 50, 451)
with h5py.File(PATH + "dataset_21cmVAE.h5", "r") as hf:
par_train = hf["par_train"][:]
par_val = hf["par_val"][:]
par_test = hf["par_test"][:]
signal_train = hf["signal_train"][:]
signal_val = hf["signal_val"][:]
signal_test = hf["signal_test"][:]
class DirectEmulator:
def __init__(
self,
par_train=par_train,
par_val=par_val,
par_test=par_test,
signal_train=signal_train,
signal_val=signal_val,
signal_test=signal_test,
hidden_dims=hidden_dims,
activation_func="relu",
redshifts=redshifts,
frequencies=None,
):
"""
The direct emulator class. This class provides the user interface for
building, training, and using a Direct Emulator such as 21cmVAE.
The default parameters are the ones used by 21cmVAE.
Parameters
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
hidden_dims : list of ints
List of dimensions of the hidden layers. Should be an empty list
if there are no hidden layers.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Attributes
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
par_labels : list of str
The names of the astrophysical parameters.
emulator : tf.keras.Model
The emulator.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Methods
-------
load_model : load an exsisting model.
train : train the emulator.
predict : use the emulator to predict global signals from astrophysical
input parameters
test_error : compute the test set error of the emulator.
save : save the class instance with all attributes.
"""
self.par_train = par_train
self.par_val = par_val
self.par_test = par_test
self.signal_train = signal_train
self.signal_val = signal_val
self.signal_test = signal_test
self.par_labels = [
"fstar",
"Vc",
"fx",
"tau",
"alpha",
"nu_min",
"Rmfp",
]
self.emulator = _gen_model(
self.par_train.shape[-1],
hidden_dims,
self.signal_train.shape[-1],
activation_func,
name="emulator",
)
if frequencies is None:
if redshifts is not None:
frequencies = redshift2freq(redshifts)
elif redshifts is None:
redshifts = freq2redshift(frequencies)
self.redshifts = redshifts
self.frequencies = frequencies
def load_model(self, model_path=PATH + "models/emulator.h5"):
"""
Load a saved model. The default parameter is the path to the saved
state of 21cmVAE as described in the paper.
Parameters
----------
model_path : str
The path to the saved model.
Raises
------
IOError : if model_path does not point to a valid model.
"""
custom_obj = {"loss_function": relative_mse_loss(self.signal_train)}
self.emulator = tf.keras.models.load_model(
model_path, custom_objects=custom_obj
)
def train(self, epochs, callbacks=[], verbose="tqdm"):
"""
Train the emulator.
Parameters
----------
epochs : int
Number of epochs to train for.
callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training loop. Default : []
verbose : 0, 1, 2, or "tqdm"
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch, "tqdm" = use progress bar from tqdm. Default : "tqdm"
Returns
-------
loss : list of floats
Training set losses.
val_loss : list of floats
Validation set losses.
"""
X_train = pp.par_transform(self.par_train, self.par_train)
X_val = pp.par_transform(self.par_val, self.par_train)
y_train = pp.preproc(self.signal_train, self.signal_train)
y_val = pp.preproc(self.signal_val, self.signal_train)
if verbose == "tqdm":
callbacks.append(TqdmCallback())
verbose = 0
hist = self.emulator.fit(
x=X_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(X_val, y_val),
validation_batch_size=256,
callbacks=callbacks,
verbose=verbose,
)
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
return loss, val_loss
def predict(self, params):
"""
Predict a (set of) global signal(s) from astrophysical parameters.
Parameters
----------
params : np.ndarray
The values of the astrophysical parameters. Must be in the order
given by the attrbiute par_labels. To predict a set of global
signals, input a 2d-array where each row correspond to a different
set of parameters.
Returns
-------
pred : np.ndarray
The predicted global signal(s).
"""
transformed_params = pp.par_transform(params, self.par_train)
proc_pred = self.emulator.predict(transformed_params)
pred = pp.unpreproc(proc_pred, self.signal_train)
if pred.shape[0] == 1:
return pred[0, :]
else:
return pred
def test_error(self, relative=True, flow=None, fhigh=None):
"""
Compute the error of the emulator for each signal in the test set.
Parameters
----------
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
flow : float or None
The lower bound of the frequency band to compute the error in.
Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in.
Default : None.
Returns
-------
err : np.ndarray
The computed errors.
"""
err = error(
self.signal_test,
self.predict(self.par_test),
relative=relative,
nu_arr=self.frequencies,
flow=flow,
fhigh=fhigh,
)
return err
def save(self):
raise NotImplementedError("Not implemented yet.")
class AutoEncoder(tf.keras.models.Model):
def __init__(
self,
signal_train=signal_train,
enc_hidden_dims=[],
dec_hidden_dims=[],
latent_dim=9,
activation_func="relu",
):
"""
Helper class that controls the autoencoder for the autoencoder-based
emulator.
Parameters
----------
signal_train : np.ndarray
The signals in the training set. Default : the signals defined in
the file "dataset_21cmVAE.h5", used by 21cmVAE
enc_hidden_dims : list of ints
The dimensions of the hidden layers of the encoder. Default : []
dec_hidden_dims : list of ints
The dimensions of the hidden layers of the decoder. Default : []
latent_dim : int
The dimension of the latent layer. Default : 9
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras. Default : "relu"
Attributes
----------
encoder : tf.keras.Model
The encoder of the autoencoder.
decoder : tf.keras.Model
The decoder of the autoencoder.
Methods
-------
call : use the autoencoder to reconstruct the input.
"""
super().__init__()
self.encoder = _gen_model(
signal_train.shape[-1],
enc_hidden_dims,
latent_dim,
activation_func,
name="encoder",
)
self.decoder = _gen_model(
None,
dec_hidden_dims,
signal_train.shape[-1],
activation_func,
name="decoder",
)
def call(self, signals):
"""
Reconstruct the given input with the autoencoder.
Parameters
----------
x : np.ndarray
The signals to reconstruct with the autoencoder.
Returns
-------
reconstructed : np.ndarray
The reconstructed signals.
"""
reconstructed = self.decoder(self.encoder(signals))
return reconstructed
# default parameters
latent_dim = 9
enc_hidden_dims = [352]
dec_hidden_dims = [32, 352]
em_hidden_dims = [352, 352, 352, 224]
class AutoEncoderEmulator:
def __init__(
self,
par_train=par_train,
par_val=par_val,
par_test=par_test,
signal_train=signal_train,
signal_val=signal_val,
signal_test=signal_test,
latent_dim=latent_dim,
enc_hidden_dims=enc_hidden_dims,
dec_hidden_dims=dec_hidden_dims,
em_hidden_dims=em_hidden_dims,
activation_func="relu",
redshifts=redshifts,
frequencies=None,
):
"""
The autoencoder-based emulator class. This class provides the user
interface for building, training, and using an autoencoder-based
emulator, as described in Appendix A of the paper.
The default parameters are the ones used in Appendix A.
Parameters
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
latent_dim : int
The dimension of the latent layer.
enc_hidden_dims : list of ints
The dimensions of the hidden layers of the encoder.
dec_hidden_dims : list of ints
The dimensions of the hidden layers of the decoder.
em_hidden_dims : list of ints
The dimensions of the hidden layers of the emulator.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Attributes
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
par_labels : list of str
The names of the astrophysical parameters.
autoencoder : AutoEncoder
An instance of the AutoEncoder class defined in this module.
emulator : tf.keras.Model
The emulator.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Methods
-------
load_model : load an exsisting model.
train : train the autoencoder and the emulator.
predict : use the emulator and decoder to predict global signals from
astrophysical input parameters
test_error : compute the test set error of the autoencoder or the
autoencider-based emulator.
save : save the class instance with all attributes.
"""
self.par_train = par_train
self.par_val = par_val
self.par_test = par_test
self.signal_train = signal_train
self.signal_val = signal_val
self.signal_test = signal_test
self.par_labels = [
"fstar",
"Vc",
"fx",
"tau",
"alpha",
"nu_min",
"Rmfp",
]
if frequencies is None:
if redshifts is not None:
frequencies = redshift2freq(redshifts)
elif redshifts is None:
redshifts = freq2redshift(frequencies)
self.redshifts = redshifts
self.frequencies = frequencies
autoencoder = AutoEncoder(
self.signal_train,
enc_hidden_dims,
dec_hidden_dims,
latent_dim,
activation_func,
)
# build autoencoder by calling it on a batch of data
autoencoder.build((None, self.signal_train.shape[-1]))
self.autoencoder = autoencoder
self.emulator = _gen_model(
self.par_train.shape[-1],
em_hidden_dims,
latent_dim,
activation_func,
name="ae_emualtor",
)
AE_PATH = PATH + "models/autoencoder_based_emulator/"
def load_model(
self,
emulator_path=AE_PATH + "ae_emulator.h5",
encoder_path=AE_PATH + "encoder.h5",
decoder_path=AE_PATH + "decoder.h5",
):
"""
Load a saved model. Default parameters are the paths to the models used
in Appendix A of the paper.
Parameters
----------
emulator_path : str
The path to the saved emulator.
encoder_path : str
The path to the saved encoder.
decoder_path : str
The path to the saved decoder.
Raises
------
IOError : if model_path does not point to a valid model.
"""
self.emulator = tf.keras.models.load_model(emulator_path)
encoder = tf.keras.models.load_model(encoder_path)
decoder = tf.keras.models.load_model(decoder_path)
autoencoder = AutoEncoder(signal_train=self.signal_train)
autoencoder.encoder = encoder
autoencoder.decoder = decoder
# build autoencoder by calling it on a batch of data
_ = autoencoder(pp.preproc(self.signal_test, self.signal_train))
self.autoencoder = autoencoder
def train(self, epochs, ae_callbacks=[], em_callbacks=[], verbose="tqdm"):
"""
Train the autoencoder and the emulator.
Parameters
----------
epochs : int
Number of epochs to train for.
ae_callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training method of the autoencoder.
Default : []
em_callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training method of the emulator.
Default : []
verbose : 0, 1, 2, or "tqdm"
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch, "tqdm" = use progress bar from tqdm. Default : "tqdm"
Returns
-------
ae_loss : list of floats
Training set losses for the autoencoder.
ae_val_loss : list of floats
Validation set losses for the autoencoder.
loss : list of floats
Training set losses for the emulator.
val_loss : list of floats
Validation set losses for the emulator.
"""
y_train = pp.preproc(self.signal_train, self.signal_train)
y_val = pp.preproc(self.signal_val, self.signal_train)
if verbose == "tqdm":
ae_callbacks.append(TqdmCallback())
em_callbacks.append(TqdmCallback())
verbose = 0
hist = self.autoencoder.fit(
x=y_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(y_val, y_val),
callbacks=ae_callbacks,
verbose=verbose,
)
ae_loss = hist.history["loss"]
ae_val_loss = hist.history["val_loss"]
X_train = pp.par_transform(self.par_train, self.par_train)
X_val = pp.par_transform(self.par_val, self.par_train)
y_train = self.autoencoder.encoder.predict(y_train)
y_val = self.autoencoder.encoder.predict(y_val)
hist = self.emulator.fit(
x=X_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(X_val, y_val),
callbacks=em_callbacks,
verbose=verbose,
)
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
return ae_loss, ae_val_loss, loss, val_loss
def predict(self, params):
"""
Predict a (set of) global signal(s) from astrophysical parameters.
Parameters
----------
params : np.ndarray
The values of the astrophysical parameters. Must be in the order
given by the attrbiute par_labels. To predict a set of global
signals, input a 2d-array where each row correspond to a different
set of parameters.
Returns
-------
pred : np.ndarray
The predicted global signal(s).
"""
transformed_params = pp.par_transform(params, self.par_train)
em_pred = self.emulator.predict(transformed_params)
decoded = self.autoencoder.decoder.predict(em_pred)
pred = pp.unpreproc(decoded, self.signal_train)
if pred.shape[0] == 1:
return pred[0, :]
else:
return pred
def test_error(
self, use_autoencoder=False, relative=True, flow=None, fhigh=None
):
"""
Compute the error of the autoencoder or the autoencoder-based emulator
for each signal in the test set.
Parameters
----------
use_auteoncoder : bool
Compute the errors of the autoencoder (True) or the emulator
(False). Default : False
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
flow : float or None
The lower bound of the frequency band to compute the error in.
Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in.
Default : None.
Returns
-------
err : np.ndarray
The computed errors.
"""
if use_autoencoder:
pred = pp.unpreproc(
self.autoencoder(
pp.preproc(self.signal_test, self.signal_train)
),
self.signal_train,
)
else:
pred = self.predict(self.par_test)
err = error(
self.signal_test,
pred,
relative=relative,
nu_arr=self.frequencies,
flow=flow,
fhigh=fhigh,
)
return err
| 26,244 | 30.132859 | 79 | py |
21cmVAE | 21cmVAE-main/tests/test_emulator.py | import h5py
import numpy as np
import tensorflow as tf
from VeryAccurateEmulator import emulator, __path__
import VeryAccurateEmulator.preprocess as pp
FILE = __path__[0] + "/dataset_21cmVAE.h5"
with h5py.File(FILE, "r") as hf:
signal_train = hf["signal_train"][:]
def test_gen_model():
in_dim = 7
hidden_dims = [32, 64, 256]
out_dim = 451
model = emulator._gen_model(in_dim, hidden_dims, out_dim, "relu")
all_dims = hidden_dims + [out_dim]
assert len(model.layers) == len(all_dims)
for i, layer in enumerate(model.layers):
shape = layer.output_shape[-1]
assert shape == all_dims[i]
def test_relative_mse_loss():
loss_fcn = emulator.relative_mse_loss(signal_train)
y_true = tf.convert_to_tensor(pp.preproc(signal_train[:10], signal_train))
y_pred = tf.convert_to_tensor(pp.preproc(signal_train[-10:], signal_train))
mse = tf.keras.metrics.mean_squared_error(y_true, y_pred)
amplitude = tf.convert_to_tensor(
np.max(np.abs(signal_train[:10] / np.std(signal_train)), axis=1)
)
rel_mse = mse / tf.keras.backend.square(amplitude)
assert np.allclose(rel_mse.numpy(), loss_fcn(y_true, y_pred).numpy())
def test_z_nu():
z = 30
nu = emulator.redshift2freq(z)
assert np.isclose(z, emulator.freq2redshift(nu))
def test_error():
z = np.linspace(5, 50, 451)
nu = emulator.redshift2freq(z)
assert np.allclose(
emulator.error(signal_train, signal_train), np.zeros(len(signal_train))
)
# direct emulator class
direm = emulator.DirectEmulator()
direm.load_model()
def test_predict():
# some random parameters:
pars = direm.par_test[0]
pred = direm.predict(pars)
true = direm.signal_test[0]
assert pred.shape == true.shape
# the emulator has a max error of 1.84 %
assert np.sqrt(np.mean((pred - true) ** 2)) / np.max(np.abs(true)) < 0.02
# vectorized call
pars = direm.par_test[:10]
pred_signals = direm.predict(pars)
assert pred_signals[0].shape == pred.shape
assert np.allclose(pred_signals[0], pred, atol=5e-5)
assert pred_signals.shape == (10, true.shape[0])
def test_test_error():
err = direm.test_error()
assert err.shape == (direm.signal_test.shape[0],)
# compare to table 1 in Bye et al. (2021)
assert np.allclose(err.mean(), 0.34, atol=1e-2)
assert np.allclose(np.median(err), 0.29, atol=1e-2)
err_mk = direm.test_error(relative=False)
assert np.allclose(err_mk.mean(), 0.54, atol=1e-2)
assert np.allclose(np.median(err_mk), 0.50, atol=1e-2)
# autoencoder-based emulator class
ae_em = emulator.AutoEncoderEmulator()
ae_em.load_model()
def test_predict_ae():
# some random parameters:
pars = ae_em.par_test[0]
pred = ae_em.predict(pars)
true = ae_em.signal_test[0]
assert pred.shape == true.shape
# error should be less than 5 % in all cases
assert np.sqrt(np.mean((pred - true) ** 2)) / np.max(np.abs(true)) < 0.05
# vectorized call
pars = ae_em.par_test[:10]
pred_signals = ae_em.predict(pars)
assert pred_signals[0].shape == pred.shape
assert np.allclose(pred_signals[0], pred, atol=5e-5)
assert pred_signals.shape == (10, true.shape[0])
def test_test_error():
err = ae_em.test_error()
assert err.shape == (direm.signal_test.shape[0],)
# compare to appendix A in Bye et al. (2021)
assert np.allclose(err.mean(), 0.39, atol=1e-2)
assert np.allclose(np.median(err), 0.35, atol=1e-2)
err_ae = ae_em.test_error(use_autoencoder=True)
assert np.allclose(err_ae.mean(), 0.33, atol=1e-2)
assert np.allclose(np.median(err_ae), 0.29, atol=1e-2)
| 3,644 | 30.973684 | 79 | py |
StyleMask | StyleMask-master/run_inference.py | import os
import datetime
import random
import sys
import argparse
from argparse import Namespace
import torch
from torch import nn
import numpy as np
import warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
sys.dont_write_bytecode = True
seed = 0
random.seed(seed)
import face_alignment
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
from libs.models.mask_predictor import MaskPredictor
from libs.utilities.utils import make_noise, generate_image, generate_new_stylespace, save_image, save_grid, get_files_frompath
from libs.utilities.stylespace_utils import decoder
from libs.configs.config_models import stylegan2_ffhq_1024
from libs.utilities.utils_inference import preprocess_image, invert_image
from libs.utilities.image_utils import image_to_tensor
from libs.models.inversion.psp import pSp
class Inference_demo():
def __init__(self, args):
self.args = args
self.device = 'cuda'
self.output_path = args['output_path']
arguments_json = os.path.join(self.output_path, 'arguments.json')
self.masknet_path = args['masknet_path']
self.image_resolution = args['image_resolution']
self.dataset = args['dataset']
self.source_path = args['source_path']
self.target_path = args['target_path']
self.num_pairs = args['num_pairs']
self.save_grid = args['save_grid']
self.save_image = args['save_image']
self.resize_image = args['resize_image']
if not os.path.exists(self.output_path):
os.makedirs(self.output_path, exist_ok=True)
def load_models(self, inversion):
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
if self.dataset == 'ffhq' and self.image_resolution == 1024:
self.generator_path = stylegan2_ffhq_1024['gan_weights']
self.channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
self.split_sections = stylegan2_ffhq_1024['split_sections']
self.stylespace_dim = stylegan2_ffhq_1024['stylespace_dim']
else:
print('Incorect dataset type {} and image resolution {}'.format(self.dataset, self.image_resolution))
if os.path.exists(self.generator_path):
print('----- Load generator from {} -----'.format(self.generator_path))
self.G = StyleGAN2Generator(self.image_resolution, 512, 8, channel_multiplier = self.channel_multiplier)
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = True)
self.G.cuda().eval()
# use truncation
self.truncation = 0.7
self.trunc =self.G.mean_latent(4096).detach().clone()
else:
print('Please download the pretrained model for StyleGAN2 generator and save it into ./pretrained_models path')
exit()
if os.path.exists(self.masknet_path):
print('----- Load mask network from {} -----'.format(self.masknet_path))
ckpt = torch.load(self.masknet_path, map_location=torch.device('cpu'))
self.num_layers_control = ckpt['num_layers_control']
self.mask_net = nn.ModuleDict({})
for layer_idx in range(self.num_layers_control):
network_name_str = 'network_{:02d}'.format(layer_idx)
# Net info
stylespace_dim_layer = self.split_sections[layer_idx]
input_dim = stylespace_dim_layer
output_dim = stylespace_dim_layer
inner_dim = stylespace_dim_layer
network_module = MaskPredictor(input_dim, output_dim, inner_dim = inner_dim)
self.mask_net.update({network_name_str: network_module})
self.mask_net.load_state_dict(ckpt['mask_net'])
self.mask_net.cuda().eval()
else:
print('Please download the pretrained model for Mask network and save it into ./pretrained_models path')
exit()
if inversion:
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cuda')
### Load inversion model only when the input is image. ###
self.encoder_path = stylegan2_ffhq_1024['e4e_inversion_model']
print('----- Load e4e encoder from {} -----'.format(self.encoder_path))
ckpt = torch.load(self.encoder_path, map_location='cpu')
opts = ckpt['opts']
opts['output_size'] = self.image_resolution
opts['checkpoint_path'] = self.encoder_path
opts['device'] = 'cuda'
opts['channel_multiplier'] = self.channel_multiplier
opts['dataset'] = self.dataset
opts = Namespace(**opts)
self.encoder = pSp(opts)
self.encoder.cuda().eval()
def load_samples(self, filepath):
inversion = False
if filepath is None:
# Generate random latent code
files_grabbed = []
for i in range(self.num_pairs):
files_grabbed.append(make_noise(1, 512))
else:
if os.path.isdir(filepath):
## Check if files inside directory are images. Else check if latent codes
files_grabbed = get_files_frompath(filepath, ['*.png', '*.jpg'])
if len(files_grabbed) == 0:
files_grabbed = get_files_frompath(filepath, ['*.npy'])
if len(files_grabbed) == 0:
print('Please specify correct path: folder with images (.png, .jpg) or latent codes (.npy)')
exit()
z_codes = []
for file_ in files_grabbed:
z_codes.append(torch.from_numpy(np.load(file_)).cuda())
z_codes = torch.cat(z_codes).unsqueeze(0)
files_grabbed = z_codes
else:
inversion = True # invert real images
elif os.path.isfile(filepath):
head, tail = os.path.split(filepath)
ext = tail.split('.')[-1]
# Check if file is image
if ext == 'png' or ext == 'jpg':
files_grabbed = [filepath]
inversion = True
elif ext == 'npy':
z_codes = torch.from_numpy(np.load(filepath)).unsqueeze(1)
files_grabbed = z_codes
else:
print('Wrong path. Expected file image (.png, .jpg) or latent code (.npy)')
exit()
else:
print('Wrong path. Expected file image (.png, .jpg) or latent code (.npy)')
exit()
return files_grabbed, inversion
def reenact_pair(self, source_code, target_code):
with torch.no_grad():
# Get source style space
source_img, style_source, w_source, noise_source = generate_image(self.G, source_code, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = self.input_is_latent, return_latents= True, resize_image = self.resize_image)
# Get target style space
target_img, style_target, w_target, noise_target = generate_image(self.G, target_code, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = self.input_is_latent, return_latents= True, resize_image = self.resize_image)
# Get reenacted image
masks_per_layer = []
for layer_idx in range(self.num_layers_control):
network_name_str = 'network_{:02d}'.format(layer_idx)
style_source_idx = style_source[layer_idx]
style_target_idx = style_target[layer_idx]
styles = style_source_idx - style_target_idx
mask_idx = self.mask_net[network_name_str](styles)
masks_per_layer.append(mask_idx)
mask = torch.cat(masks_per_layer, dim=1)
style_source = torch.cat(style_source, dim=1)
style_target = torch.cat(style_target, dim=1)
new_style_space = generate_new_stylespace(style_source, style_target, mask, num_layers_control = self.num_layers_control)
new_style_space = list(torch.split(tensor=new_style_space, split_size_or_sections=self.split_sections, dim=1))
reenacted_img = decoder(self.G, new_style_space, w_source, noise_source, resize_image = self.resize_image)
return source_img, target_img, reenacted_img
def check_paths(self):
assert type(self.target_path) == type(self.source_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
if self.source_path is not None and self.target_path is not None:
if os.path.isdir(self.source_path):
assert os.path.isdir(self.target_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
if os.path.isfile(self.source_path):
assert os.path.isfile(self.target_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
def run(self):
self.check_paths()
source_samples, inversion = self.load_samples(self.source_path)
target_samples, inversion = self.load_samples(self.target_path)
assert len(source_samples) == len(target_samples), "Number of source samples should be the same with target samples"
self.load_models(inversion)
self.num_pairs = len(source_samples)
print('Reenact {} pairs'.format(self.num_pairs))
for i in tqdm(range(self.num_pairs)):
if inversion: # Real image
# Preprocess and invert real images into the W+ latent space using Encoder4Editing method
cropped_image = preprocess_image(source_samples[i], self.fa, save_filename = None)
source_img = image_to_tensor(cropped_image).unsqueeze(0).cuda()
inv_image, source_code = invert_image(source_img, self.encoder, self.G, self.truncation, self.trunc)
cropped_image = preprocess_image(target_samples[i], self.fa)
target_img = image_to_tensor(cropped_image).unsqueeze(0).cuda()
inv_image, target_code = invert_image(target_img, self.encoder, self.G, self.truncation, self.trunc)
self.input_is_latent = True
else: # synthetic latent code
if self.source_path is not None:
source_code = source_samples[i].cuda()
target_code = target_samples[i].cuda()
if source_code.ndim == 2:
self.input_is_latent = False # Z space 1 X 512
elif source_code.ndim == 3:
self.truncation = 1.0
self.trunc = None
self.input_is_latent = True # W sapce 1 X 18 X 512
else:
source_code = source_samples[i].cuda()
target_code = target_samples[i].cuda()
self.input_is_latent = False # Z space
source_img, target_img, reenacted_img = self.reenact_pair(source_code, target_code)
if self.save_grid:
save_grid(source_img, target_img, reenacted_img, os.path.join(self.output_path, 'grid_{:04d}.png').format(i))
if self.save_image:
save_image(reenacted_img, os.path.join(self.output_path, '{:04d}.png').format(i))
def main():
"""
Inference script.
Options:
######### General ###########
--output_path : path to save output images
--source_path : It can be either an image file, or a latent code or a directory with images or latent codes or None.
If source path is None then it will generate a random latent code.
--target_path : It can be either an image file, or a latent code or a directory with images or latent codes or None.
If target path is None then it will generate a random latent code.
--masknet_path : path to pretrained model for mask network
--dataset : dataset (ffhq)
--image_resolution : image resolution (1024)
--num_pairs : number of pairs to reenact
########## Visualization ##########
--save_grid : Generate figure with source, target and reenacted image
--save_image : Save only the reenacted image
--resize_image : Resize image from 1024 to 256
python run_inference.py --output_path ./results --save_grid
"""
parser = argparse.ArgumentParser(description="training script")
######### General #########
parser.add_argument('--output_path', type=str, required = True, help="path to save output images")
parser.add_argument('--source_path', type=str, default = None, help='path to source samples (latent codes or images)')
parser.add_argument('--target_path', type=str, default = None, help='path to target samples (latent codes or images)')
parser.add_argument('--masknet_path', type=str, default = './pretrained_models/mask_network_1024.pt', help="path to pretrained model for mask network")
parser.add_argument('--dataset', type=str, default = 'ffhq', help="dataset")
parser.add_argument('--image_resolution', type=int, default = 1024, help="image resolution")
parser.add_argument('--num_pairs', type=int, default = 4, help="number of random pairs to reenact")
parser.add_argument('--save_grid', dest='save_grid', action='store_true', help="Generate figure with source, target and reenacted image")
parser.set_defaults(save_grid=False)
parser.add_argument('--save_image', dest='save_image', action='store_true', help="Save only the reenacted image")
parser.set_defaults(save_image=False)
parser.add_argument('--resize_image', dest='resize_image', action='store_true', help="Resize image from 1024 to 256")
parser.set_defaults(resize_image=False)
# Parse given arguments
args = parser.parse_args()
args = vars(args) # convert to dictionary
inf = Inference_demo(args)
inf.run()
if __name__ == '__main__':
main()
| 12,669 | 39.479233 | 162 | py |
StyleMask | StyleMask-master/extract_statistics.py | """
Script to extract the npy file with the min, max values of facial pose parameters (yaw, pitch, roll, jaw and expressions)
1. Generate a set of random synthetic images
2. Use DECA model to extract the facial shape and the corresponding parameters
3. Calculate min, max values
"""
import os
import glob
import numpy as np
from PIL import Image
import torch
from torch.nn import functional as F
import matplotlib.pyplot as plt
import json
import cv2
from tqdm import tqdm
import argparse
from torchvision import utils as torch_utils
import warnings
warnings.filterwarnings("ignore")
from libs.configs.config_models import *
from libs.utilities.utils import make_noise, make_path, calculate_shapemodel
from libs.DECA.estimate_DECA import DECA_model
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
def extract_stats(statistics):
num_stats = statistics.shape[1]
statistics = np.transpose(statistics, (1, 0))
ranges = []
for i in range(statistics.shape[0]):
pred = statistics[i, :]
max_ = np.amax(pred)
min_ = np.amin(pred)
if i == 0:
label = 'yaw'
elif i == 1:
label = 'pitch'
elif i == 2:
label = 'roll'
elif i == 3:
label = 'jaw'
else:
label = 'exp_{:02d}'.format(i)
print('{}/{} Min {:.2f} Max {:.2f}'.format(label, i, min_, max_))
ranges.append([min_, max_])
return ranges
if __name__ == '__main__':
num_images = 2000
image_resolution = 1024
dataset = 'FFHQ'
output_path = './{}_stats'.format(dataset)
make_path(output_path)
gan_weights = stylegan2_ffhq_1024['gan_weights']
channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
print('----- Load generator from {} -----'.format(gan_weights))
truncation = 0.7
generator = StyleGAN2Generator(image_resolution, 512, 8, channel_multiplier= channel_multiplier)
generator.load_state_dict(torch.load(gan_weights)['g_ema'], strict = True)
generator.cuda().eval()
trunc = generator.mean_latent(4096).detach().clone()
shape_model = DECA_model('cuda')
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
statistics = []
with torch.no_grad():
for i in tqdm(range(num_images)):
z = make_noise(1, 512).cuda()
source_img = generator([z], return_latents = False, truncation = truncation, truncation_latent = trunc, input_is_latent = False)[0]
source_img = face_pool(source_img)
params_source, angles_source = calculate_shapemodel(shape_model, source_img)
yaw = angles_source[:,0][0].detach().cpu().numpy()
pitch = angles_source[:,1][0].detach().cpu().numpy()
roll = angles_source[:, 2][0].detach().cpu().numpy()
exp = params_source['alpha_exp'][0].detach().cpu().numpy()
jaw = params_source['pose'][0, 3].detach().cpu().numpy()
tmp = np.zeros(54)
tmp[0] = yaw
tmp[1] = pitch
tmp[2] = roll
tmp[3] = jaw
tmp[4:] = exp
# np.save(os.path.join(output_path, '{:07d}.npy'.format(i)), tmp)
statistics.append(tmp)
statistics = np.asarray(statistics)
np.save(os.path.join(output_path, 'stats_all.npy'), statistics)
ranges = extract_stats(statistics)
np.save(os.path.join(output_path, 'ranges_{}.npy'.format(dataset)), ranges)
| 3,152 | 26.181034 | 134 | py |
StyleMask | StyleMask-master/libs/trainer.py | """
"""
import os
import json
import torch
import time
import numpy as np
import pdb
import cv2
import wandb
from torch import autograd
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from libs.utilities.utils import *
from libs.utilities.image_utils import *
from libs.DECA.estimate_DECA import DECA_model
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
from libs.models.mask_predictor import MaskPredictor
from libs.utilities.stylespace_utils import decoder
from libs.configs.config_models import stylegan2_ffhq_1024
from libs.criteria.losses import Losses
from libs.criteria import id_loss
from libs.criteria.lpips.lpips import LPIPS
from libs.utilities.utils_inference import generate_grid_image, calculate_evaluation_metrics
from libs.utilities.dataloader import CustomDataset_validation
class Trainer(object):
def __init__(self, args):
self.args = args
self.initialize_arguments(args)
################# Initialize output paths #################
make_path(self.output_path)
self.log_dir = os.path.join(self.output_path, 'logs')
make_path(self.log_dir)
self.models_dir = os.path.join(self.output_path, 'models')
make_path(self.models_dir)
####################################################################
# save arguments file with params
save_arguments_json(args, self.output_path, 'arguments.json')
def initialize_arguments(self, args):
self.output_path = args['experiment_path']
self.use_wandb = args['use_wandb']
self.log_images_wandb = args['log_images_wandb']
self.project_wandb = args['project_wandb']
self.resume_training_model = args['resume_training_model']
self.image_resolution = args['image_resolution']
self.dataset_type = args['dataset_type']
self.synthetic_dataset_path = args['synthetic_dataset_path']
self.lr = args['lr']
self.num_layers_control = args['num_layers_control']
self.max_iter = args['max_iter']
self.batch_size = args['batch_size']
self.test_batch_size = args['test_batch_size']
self.workers = args['workers']
# Weights
self.lambda_identity = args['lambda_identity']
self.lambda_perceptual = args['lambda_perceptual']
self.lambda_shape = args['lambda_shape']
self.use_recurrent_cycle_loss = args['use_recurrent_cycle_loss']
self.steps_per_log = args['steps_per_log']
self.steps_per_save_models = args['steps_per_save_models']
self.steps_per_evaluation = args['steps_per_evaluation']
self.validation_pairs = args['validation_pairs']
self.num_pairs_log = args['num_pairs_log']
# if self.num_pairs_log > self.validation_pairs:
# self.num_pairs_log = self.validation_pairs
def load_models(self):
################## Initialize models #################
print('-- Load DECA model ')
self.deca = DECA_model('cuda')
self.id_loss_ = id_loss.IDLoss().cuda().eval()
self.lpips_loss = LPIPS(net_type='alex').cuda().eval()
self.losses = Losses()
####################################################################
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
if self.dataset_type == 'ffhq' and self.image_resolution == 1024:
self.generator_path = stylegan2_ffhq_1024['gan_weights']
self.channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
self.split_sections = stylegan2_ffhq_1024['split_sections']
self.stylespace_dim = stylegan2_ffhq_1024['stylespace_dim']
self.exp_ranges = np.load(stylegan2_ffhq_1024['expression_ranges'])
else:
print('Incorect dataset type {} and image resolution {}'.format(self.dataset_type, self.image_resolution))
if self.num_layers_control is not None:
self.num_nets = self.num_layers_control
else:
self.num_nets = len(self.split_sections)
print('-- Load generator from {} '.format(self.generator_path))
self.G = StyleGAN2Generator(self.image_resolution, 512, 8, channel_multiplier= self.channel_multiplier)
if self.image_resolution == 256:
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = False)
else:
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = True)
self.G.cuda().eval()
self.truncation = 0.7
self.trunc = self.G.mean_latent(4096).detach().clone()
print('-- Initialize mask predictor.')
self.mask_net = nn.ModuleDict({})
for layer_idx in range(self.num_nets):
network_name_str = 'network_{:02d}'.format(layer_idx)
# Net info
stylespace_dim_layer = self.split_sections[layer_idx]
input_dim = stylespace_dim_layer
output_dim = stylespace_dim_layer
inner_dim = stylespace_dim_layer
network_module = MaskPredictor(input_dim, output_dim, inner_dim = inner_dim)
self.mask_net.update({network_name_str: network_module})
out_text = 'Network {}: ----> input_dim:{} - output_dim:{}'.format(layer_idx, input_dim, output_dim)
print(out_text)
def configure_dataset(self):
self.test_dataset = CustomDataset_validation(synthetic_dataset_path = self.synthetic_dataset_path, validation_pairs = self.validation_pairs)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.test_batch_size ,
shuffle=False,
num_workers=int(self.workers),
drop_last=True)
def start_from_checkpoint(self):
step = 0
if self.resume_training_model is not None:
if os.path.isfile(self.resume_training_model):
print('Resuming training from {}'.format(self.resume_training_model))
state_dict = torch.load(self.resume_training_model)
if 'step' in state_dict:
step = state_dict['step']
if 'mask_net' in state_dict:
self.mask_net.load_state_dict(state_dict['mask_net'])
return step
def get_shifted_image(self, style_source, style_target, w, noise):
# Generate shift
masks_per_layer = []
for layer_idx in range(self.num_nets):
network_name_str = 'network_{:02d}'.format(layer_idx)
style_source_idx = style_source[layer_idx]
style_target_idx = style_target[layer_idx]
styles = style_source_idx - style_target_idx
mask_idx = self.mask_net[network_name_str](styles)
masks_per_layer.append(mask_idx)
style_source = torch.cat(style_source, dim=1)
style_target = torch.cat(style_target, dim=1)
mask = torch.cat(masks_per_layer, dim=1)
new_style_space = generate_new_stylespace(style_source, style_target, mask, self.num_layers_control)
new_style_space = list(torch.split(tensor=new_style_space, split_size_or_sections=self.split_sections, dim=1))
imgs_shifted = decoder(self.G, new_style_space, w, noise, resize_image = True)
return imgs_shifted, new_style_space
def train(self):
self.load_models()
if self.use_wandb:
#########################
config = self.args
wandb.init(
project= self.project_wandb,
notes="",
tags=["debug"],
config=config,
)
name = self.output_path.split('/')[-1]
wandb.run.name = name
wandb.watch(self.mask_net, log="all", log_freq=500)
#######################
self.configure_dataset()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.G.cuda().eval()
self.mask_net.train().cuda()
optimizer = torch.optim.Adam(self.mask_net.parameters(), lr=self.lr, weight_decay=5e-4)
self.truncation = 0.7
latent_in = torch.randn(4096, 512).cuda()
self.trunc = self.G.style(latent_in).mean(0, keepdim=True)
input_is_latent = False
recovered_step = self.start_from_checkpoint()
if recovered_step != 0:
print('Resume training from {}'.format(recovered_step))
list_loss = []
for step in range(recovered_step, self.max_iter):
loss_dict = {}
self.G.zero_grad()
source_z = make_noise(self.batch_size, 512, None).cuda()
target_z = make_noise(self.batch_size, 512, None).cuda()
with torch.no_grad():
######## Source images ########
imgs_source, style_source, w_source, noise_source = generate_image(self.G, source_z, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_source, angles_source = calculate_shapemodel(self.deca, imgs_source)
######## Target images ########
imgs_target, style_target, w_target, noise_target = generate_image(self.G, target_z, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target, angles_target = calculate_shapemodel(self.deca, imgs_target)
######## Generate reenacted image between source and target images ########
imgs_shifted, new_style_space = self.get_shifted_image(style_source, style_target, w_source, noise_source)
params_shifted, angles_shifted = calculate_shapemodel(self.deca, imgs_shifted)
loss, loss_dict = self.calculate_loss(params_source, params_shifted, params_target, imgs_source, imgs_shifted)
if self.use_recurrent_cycle_loss:
########## Recurrent Cycle loss ##########
with torch.no_grad():
### Generate a new random target image ###
target_z_cycle = make_noise(self.batch_size, 512, None).cuda()
imgs_target_cycle, style_target_cycle, w_target_cycle, noise_target_cycle = generate_image(self.G, target_z_cycle, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target_cycle, angles_target_cycle = calculate_shapemodel(self.deca, imgs_target_cycle)
#### Reenact source image into the facial pose of target_z_cycle ####
imgs_shifted_hat, new_style_space_hat = self.get_shifted_image(style_source, style_target_cycle, w_source, noise_source)
params_shifted_hat, angles_shifted_hat = calculate_shapemodel(self.deca, imgs_shifted_hat)
#####################################################################
#### Reenact initial shifted image into the facial pose of target_z_cycle ####
imgs_shifted_hat_2, new_style_space_hat_2 = self.get_shifted_image(new_style_space, style_target_cycle, w_source, noise_source)
params_shifted_hat_2, angles_shifted_hat_2 = calculate_shapemodel(self.deca, imgs_shifted_hat_2)
loss_cycle, loss_dict = self.calculate_recurrent_loss(params_source, params_target_cycle, params_shifted_hat,
params_shifted_hat_2, imgs_source, imgs_shifted_hat, imgs_shifted_hat_2, loss_dict)
loss += loss_cycle
############## Total loss ##############
list_loss.append(loss.data.item())
self.mask_net.zero_grad()
loss.backward()
optimizer.step()
######### Evaluate #########
if step % self.steps_per_log == 0:
out_text = '[step {}]'.format(step)
for key, value in loss_dict.items():
out_text += (' | {}: {:.2f}'.format(key, value))
out_text += '| Mean Loss {:.2f}'.format(np.mean(np.array(list_loss)))
print(out_text)
if step % self.steps_per_save_models == 0 and step > 0:
self.save_model(step)
if step % self.steps_per_evaluation == 0:
self.evaluate_model_reenactment(step)
if step % 500 == 0 and step > 0:
list_loss = []
if self.use_wandb:
wandb.log({
'step': step,
})
wandb.log(loss_dict)
def calculate_loss(self, params_source, params_shifted, params_target, imgs_source, imgs_shifted):
loss_dict = {}
loss = 0
############## Shape Loss ##############
if self.lambda_shape !=0:
coefficients_gt = {}
coefficients_gt['pose'] = params_target['pose']
coefficients_gt['exp'] = params_target['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted['pose']
coefficients_reen['shape'] = params_shifted['alpha_shp']
coefficients_reen['exp'] = params_shifted['alpha_exp']
coefficients_reen['cam'] = params_shifted['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape = self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth = self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye = self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_dict['loss_shape'] = loss_shape.data.item()
loss_dict['loss_eye'] = loss_eye.data.item()
loss_dict['loss_mouth'] = loss_mouth.data.item()
loss += loss_mouth
loss += loss_shape
loss += loss_eye
####################################################
############## Identity losses ##############
if self.lambda_identity != 0:
loss_identity = self.lambda_identity * self.id_loss_(imgs_shifted, imgs_source.detach())
loss_dict['loss_identity'] = loss_identity.data.item()
loss += loss_identity
if self.lambda_perceptual != 0:
imgs_source_255 = tensor_to_255(imgs_source)
imgs_shifted_255 = tensor_to_255(imgs_shifted)
loss_perceptual = self.lambda_perceptual * self.lpips_loss(imgs_shifted_255, imgs_source_255.detach())
loss_dict['loss_perceptual'] = loss_perceptual.data.item()
loss += loss_perceptual
loss_dict['loss'] = loss.data.item()
return loss, loss_dict
def calculate_recurrent_loss(self, params_source, params_target_cycle, params_shifted_hat, params_shifted_hat_2, imgs_source, imgs_shifted_hat, imgs_shifted_hat_2, loss_dict):
loss = 0
############## Shape Loss ##############
if self.lambda_shape > 0:
# 1
coefficients_gt = {}
coefficients_gt['pose'] = params_target_cycle['pose']
coefficients_gt['exp'] = params_target_cycle['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted_hat['pose']
coefficients_reen['shape'] = params_shifted_hat['alpha_shp']
coefficients_reen['exp'] = params_shifted_hat['alpha_exp']
coefficients_reen['cam'] = params_shifted_hat['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape = self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth = self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye = self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
# 2
coefficients_gt = {}
coefficients_gt['pose'] = params_target_cycle['pose']
coefficients_gt['exp'] = params_target_cycle['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted_hat_2['pose']
coefficients_reen['shape'] = params_shifted_hat_2['alpha_shp']
coefficients_reen['exp'] = params_shifted_hat_2['alpha_exp']
coefficients_reen['cam'] = params_shifted_hat_2['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape += self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth += self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye += self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_dict['loss_shape_cycle'] = loss_shape.data.item()
loss_dict['loss_eye_cycle'] = loss_eye.data.item()
loss_dict['loss_mouth_cycle'] = loss_mouth.data.item()
loss += loss_mouth
loss += loss_shape
loss += loss_eye
############## Identity losses ##############
if self.lambda_identity != 0:
loss_identity = self.lambda_identity * self.id_loss_(imgs_shifted_hat, imgs_source.detach())
loss_identity += self.lambda_identity* self.id_loss_(imgs_shifted_hat_2, imgs_source.detach())
loss_dict['loss_identity_cycle'] = loss_identity.data.item()
loss += loss_identity
if self.lambda_perceptual != 0:
imgs_shifted_hat_255 = tensor_to_255(imgs_shifted_hat)
imgs_shifted_hat_2_255 = tensor_to_255(imgs_shifted_hat_2)
loss_perceptual = self.lambda_perceptual * self.lpips_loss(imgs_shifted_hat_255, imgs_shifted_hat_2_255)
loss_dict['loss_perceptual_cycle'] = loss_perceptual.data.item()
loss += loss_perceptual
loss_dict['loss_cycle'] = loss.data.item()
return loss, loss_dict
def save_model(self, step):
state_dict = {
'step': step,
'mask_net': self.mask_net.state_dict(),
'num_layers_control': self.num_layers_control
}
checkpoint_path = os.path.join(self.models_dir, 'mask_net_{:06d}.pt'.format(step))
torch.save(state_dict, checkpoint_path)
'Evaluate models for face reenactment and save reenactment figure'
def evaluate_model_reenactment(self, step):
input_is_latent = False
self.mask_net.eval()
exp_error = 0; pose_error = 0; csim_total = 0; count = 0
counter_logs = 0
source_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
target_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
reenacted_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
for batch_idx, batch in enumerate(tqdm(self.test_dataloader)):
with torch.no_grad():
sample_batch = batch
source_w = sample_batch['source_w'].cuda()
target_w = sample_batch['target_w'].cuda()
imgs_source, style_source, w_source, noise_source = generate_image(self.G, source_w, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_source, angles_source = calculate_shapemodel(self.deca, imgs_source)
imgs_target, style_target, w_target, noise_target = generate_image(self.G, target_w, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target, angles_target = calculate_shapemodel(self.deca, imgs_target)
imgs_shifted, new_style_space = self.get_shifted_image(style_source, style_target, w_source, noise_source)
params_shifted, angles_shifted = calculate_shapemodel(self.deca, imgs_shifted)
csim, pose, exp = calculate_evaluation_metrics(params_shifted, params_target, angles_shifted, angles_target, imgs_shifted, imgs_source, self.id_loss_, self.exp_ranges)
exp_error += exp
csim_total += csim
pose_error += pose
count += 1
if counter_logs < self.num_pairs_log:
if (self.num_pairs_log - counter_logs) % source_w.shape[0] == 0:
source_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_source.detach().cpu()
target_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_target.detach().cpu()
reenacted_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_shifted.detach().cpu()
else:
num = self.num_pairs_log - counter_logs
source_images[counter_logs:counter_logs+num] = imgs_source[:num].detach().cpu()
target_images[counter_logs:counter_logs+num] = imgs_target[:num].detach().cpu()
reenacted_images[counter_logs:counter_logs+num] = imgs_shifted[:num].detach().cpu()
counter_logs += source_w.shape[0]
sample = generate_grid_image(source_images, target_images, reenacted_images)
save_image(sample, os.path.join(self.log_dir, '{:06d}.png'.format(step)))
if self.use_wandb and self.log_images_wandb:
image_array = sample.detach().cpu().numpy()
image_array = np.transpose(image_array, (1, 2, 0))
images = wandb.Image(image_array)
wandb.log({"images": images})
print('*************** Validation ***************')
print('Expression Error: {:.4f}\t Pose Error: {:.2f}\t CSIM: {:.2f}'.format(exp_error/count, pose_error/count, csim_total/count))
print('*************** Validation ***************')
if self.use_wandb:
wandb.log({
'expression_error': exp_error/count,
'pose_error': pose_error/count,
'csim': csim_total/count,
})
self.mask_net.train()
| 20,832 | 40.5 | 192 | py |
StyleMask | StyleMask-master/libs/models/mask_predictor.py | import torch
from torch import nn
class MaskPredictor(nn.Module):
def __init__(self, input_dim, output_dim, inner_dim=1024):
super(MaskPredictor, self).__init__()
self.masknet = nn.Sequential(nn.Linear(input_dim, inner_dim, bias=True),
nn.ReLU(),
nn.Linear(inner_dim, output_dim, bias=True),
)
self.initilization()
def initilization(self):
torch.nn.init.normal_(self.masknet[0].weight, mean=0.0, std=0.01)
torch.nn.init.normal_(self.masknet[2].weight, mean=0.0, std=0.01)
def forward(self, input):
out = self.masknet(input)
out = torch.nn.Sigmoid()(out)
return out
| 628 | 21.464286 | 74 | py |
StyleMask | StyleMask-master/libs/models/inversion/psp.py | """
This file defines the core research contribution
"""
import math
import matplotlib
matplotlib.use('Agg')
import torch
from torch import nn
import torchvision.transforms as transforms
import os
from libs.models.inversion import psp_encoders
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class pSp(nn.Module):
def __init__(self, opts):
super(pSp, self).__init__()
self.opts = opts
# compute number of style inputs based on the output resolution
self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2
self.n_styles = self.opts.n_styles
# Define architecture
self.encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
# Load weights if needed
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
self.__load_latent_avg(ckpt)
def forward(self, real_image, randomize_noise=False, inject_latent=None, return_latents=False, alpha=None, average_code=False, input_is_full=False):
codes = self.encoder(real_image)
if self.latent_avg is not None:
if codes.ndim == 2:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
else:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
return codes
def __load_latent_avg(self, ckpt, repeat=None):
if 'latent_avg' in ckpt:
self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
if repeat is not None:
self.latent_avg = self.latent_avg.repeat(repeat, 1)
else:
self.latent_avg = None | 1,643 | 28.357143 | 149 | py |
StyleMask | StyleMask-master/libs/models/inversion/psp_encoders.py | from enum import Enum
import math
import numpy as np
import torch
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from libs.models.inversion.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
from libs.models.StyleGAN2.model import EqualLinear, ScaledLeakyReLU, EqualConv2d
class ProgressiveStage(Enum):
WTraining = 0
Delta1Training = 1
Delta2Training = 2
Delta3Training = 3
Delta4Training = 4
Delta5Training = 5
Delta6Training = 6
Delta7Training = 7
Delta8Training = 8
Delta9Training = 9
Delta10Training = 10
Delta11Training = 11
Delta12Training = 12
Delta13Training = 13
Delta14Training = 14
Delta15Training = 15
Delta16Training = 16
Delta17Training = 17
Inference = 18
class GradualStyleBlock(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
class GradualStyleEncoder(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(GradualStyleEncoder, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.input_layer(x)
latents = []
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = _upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = _upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out
class Encoder4Editing(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(Encoder4Editing, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def set_progressive_stage(self, new_stage: ProgressiveStage):
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.input_layer(x)
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
# Infer main W and duplicate it
w0 = self.styles[0](c3)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
stage = self.progressive_stage.value
features = c3
for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
if i == self.coarse_ind:
p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
features = p2
elif i == self.middle_ind:
p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
features = p1
delta_i = self.styles[i](features)
w[:, i] += delta_i
return w
class BackboneEncoderUsingLastLayerIntoW(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
print('Using BackboneEncoderUsingLastLayerIntoW')
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.linear = EqualLinear(512, 512, lr_mul=1)
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_pool(x)
x = x.view(-1, 512)
x = self.linear(x)
return x.repeat(self.style_count, 1, 1).permute(1, 0, 2)
# Consultation encoder
class ResidualEncoder(Module):
def __init__(self, opts=None):
super(ResidualEncoder, self).__init__()
self.conv_layer1 = Sequential(Conv2d(3, 32, (3, 3), 1, 1, bias=False),
BatchNorm2d(32),
PReLU(32))
self.conv_layer2 = Sequential(*[bottleneck_IR(32,48,2), bottleneck_IR(48,48,1), bottleneck_IR(48,48,1)])
self.conv_layer3 = Sequential(*[bottleneck_IR(48,64,2), bottleneck_IR(64,64,1), bottleneck_IR(64,64,1)])
self.condition_scale3 = nn.Sequential(
EqualConv2d(64, 512, 3, stride=1, padding=1, bias=True ),
ScaledLeakyReLU(0.2),
EqualConv2d(512, 512, 3, stride=1, padding=1, bias=True ))
self.condition_shift3 = nn.Sequential(
EqualConv2d(64, 512, 3, stride=1, padding=1, bias=True ),
ScaledLeakyReLU(0.2),
EqualConv2d(512, 512, 3, stride=1, padding=1, bias=True ))
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def forward(self, x):
conditions = []
feat1 = self.conv_layer1(x)
feat2 = self.conv_layer2(feat1)
feat3 = self.conv_layer3(feat2)
scale = self.condition_scale3(feat3)
scale = torch.nn.functional.interpolate(scale, size=(64,64) , mode='bilinear')
conditions.append(scale.clone())
shift = self.condition_shift3(feat3)
shift = torch.nn.functional.interpolate(shift, size=(64,64) , mode='bilinear')
conditions.append(shift.clone())
return conditions
# ADA
class ResidualAligner(Module):
def __init__(self, opts=None):
super(ResidualAligner, self).__init__()
self.conv_layer1 = Sequential(Conv2d(6, 16, (3, 3), 1, 1, bias=False),
BatchNorm2d(16),
PReLU(16))
self.conv_layer2 = Sequential(*[bottleneck_IR(16,32,2), bottleneck_IR(32,32,1), bottleneck_IR(32,32,1)])
self.conv_layer3 = Sequential(*[bottleneck_IR(32,48,2), bottleneck_IR(48,48,1), bottleneck_IR(48,48,1)])
self.conv_layer4 = Sequential(*[bottleneck_IR(48,64,2), bottleneck_IR(64,64,1), bottleneck_IR(64,64,1)])
self.dconv_layer1 = Sequential(*[bottleneck_IR(112,64,1), bottleneck_IR(64,32,1), bottleneck_IR(32,32,1)])
self.dconv_layer2 = Sequential(*[bottleneck_IR(64,32,1), bottleneck_IR(32,16,1), bottleneck_IR(16,16,1)])
self.dconv_layer3 = Sequential(*[bottleneck_IR(32,16,1), bottleneck_IR(16,3,1), bottleneck_IR(3,3,1)])
def forward(self, x):
feat1 = self.conv_layer1(x)
feat2 = self.conv_layer2(feat1)
feat3 = self.conv_layer3(feat2)
feat4 = self.conv_layer4(feat3)
feat4 = torch.nn.functional.interpolate(feat4, size=(64,64) , mode='bilinear')
dfea1 = self.dconv_layer1(torch.cat((feat4, feat3),1))
dfea1 = torch.nn.functional.interpolate(dfea1, size=(128,128) , mode='bilinear')
dfea2 = self.dconv_layer2(torch.cat( (dfea1, feat2),1))
dfea2 = torch.nn.functional.interpolate(dfea2, size=(256,256) , mode='bilinear')
dfea3 = self.dconv_layer3(torch.cat( (dfea2, feat1),1))
res_aligned = dfea3
return res_aligned
| 12,262 | 37.806962 | 115 | py |
StyleMask | StyleMask-master/libs/models/inversion/helpers.py | from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Linear
import torch.nn.functional as F
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class SeparableConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise = Conv2d(in_channels, in_channels, kernel_size=kernel_size, groups=in_channels, bias=bias, padding=1)
self.pointwise = Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def _upsample_add(x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
class SeparableBlock(Module):
def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size):
super(SeparableBlock, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.kernel_channels_in = kernel_channels_in
self.kernel_channels_out = kernel_channels_out
self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in)
self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out)
self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in)
self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out)
def forward(self, features):
features = features.view(-1, self.input_size)
kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size, self.kernel_size, 1, self.kernel_channels_in)
kernel_out = self.make_kernel_out(features).view(-1, self.kernel_size, self.kernel_size, self.kernel_channels_out, 1)
kernel = torch.matmul(kernel_out, kernel_in)
kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3)
kernel = self.kernel_linear_out(kernel)
kernel = kernel.permute(0, 4, 3, 1, 2)
return kernel
| 5,916 | 30.473404 | 120 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/model.py | import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from .op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class Downsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
'{}({}, {},'.format(self.__class__.__name__, self.weight.shape[1], self.weight.shape[0]) +
' {}, stride={}, padding={})'.format(self.weight.shape[2], self.stride, self.padding)
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
return out
def __repr__(self):
return (
'{}({}, {})'.format(self.__class__.__name__, self.weight.shape[1], self.weight.shape[0])
)
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
'{}({}, {}, {}, '.format(self.__class__.__name__, self.in_channel, self.out_channel, self.kernel_size) +
'upsample={}, downsample={})'.format(self.upsample, self.downsample)
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=upsample,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.noise = NoiseInjection()
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
# self.activate = ScaledLeakyReLU(0.2)
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
# out = out + self.bias
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.size = size
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(
EqualLinear(
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
)
)
self.style = nn.Sequential(*layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyledConv(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.num_layers = (self.log_size - 2) * 2 + 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
in_channel = self.channels[4]
for layer_idx in range(self.num_layers):
res = (layer_idx + 5) // 2
shape = [1, 1, 2 ** res, 2 ** res]
self.noises.register_buffer('noise_{}'.format(layer_idx), torch.randn(*shape))
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.convs.append(
StyledConv(
in_channel,
out_channel,
3,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
)
)
self.convs.append(
StyledConv(
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
)
)
self.to_rgbs.append(ToRGB(out_channel, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
def make_noise(self):
# device = self.input.input.device
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2).cuda()]
for i in range(3, self.log_size + 1):
for _ in range(2):
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i).cuda())
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim).cuda()
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def forward(
self,
styles,
return_latents=False,
return_features=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
randomize_noise=False
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers
else:
noise = [getattr(self.noises, 'noise_{}'.format(i)) for i in range(self.num_layers)]
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
count = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
class Discriminator(nn.Module):
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
EqualLinear(channels[4], 1),
)
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
group = min(batch, self.stddev_group)
stddev = out.view(
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
out = torch.cat([out, stddev], 1)
out = self.final_conv(out)
out = out.view(batch, -1)
out = self.final_linear(out)
return out
class Encoder(nn.Module):
def __init__(self, size, w_dim=512):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256,
128: 128,
256: 64,
512: 32,
1024: 16
}
self.w_dim = w_dim
log_size = int(math.log(size, 2))
self.n_latents = log_size*2 - 2
convs = [ConvLayer(3, channels[size], 1)]
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel))
in_channel = out_channel
convs.append(EqualConv2d(in_channel, self.n_latents*self.w_dim, 4, padding=0, bias=False))
self.convs = nn.Sequential(*convs)
def forward(self, input):
out = self.convs(input)
# print('Encoder weights: {:.5f}'.format( torch.sum(torch.abs( self.convs[0][0].weight.data )) ))
return out.view(len(input), self.n_latents, self.w_dim) | 19,525 | 26.501408 | 116 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/convert_weight.py | import argparse
import os
import sys
import pickle
import math
import torch
import numpy as np
from torchvision import utils
from models.StyleGAN2.model import Generator, Discriminator
def convert_modconv(vars, source_name, target_name, flip=False):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
noise = vars[source_name + '/noise_strength'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'noise.weight': np.array([noise]),
'activate.bias': bias,
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
if flip:
dic_torch[target_name + '.conv.weight'] = torch.flip(
dic_torch[target_name + '.conv.weight'], [3, 4]
)
return dic_torch
def convert_conv(vars, source_name, target_name, bias=True, start=0):
weight = vars[source_name + '/weight'].value().eval()
dic = {'weight': weight.transpose((3, 2, 0, 1))}
if bias:
dic['bias'] = vars[source_name + '/bias'].value().eval()
dic_torch = {}
dic_torch[target_name + '.{}.weight'.format(start)] = torch.from_numpy(dic['weight'])
if bias:
dic_torch[target_name + '.{}.bias'.format(start + 1)] = torch.from_numpy(dic['bias'])
return dic_torch
def convert_torgb(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'bias': bias.reshape((1, 3, 1, 1)),
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def convert_dense(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {'weight': weight.transpose((1, 0)), 'bias': bias}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def update(state_dict, new):
for k, v in new.items():
if k not in state_dict:
raise KeyError(k + ' is not found')
if v.shape != state_dict[k].shape:
raise ValueError('Shape mismatch: {} vs {}'.format(v.shape, state_dict[k].shape))
state_dict[k] = v
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, '{}x{}/FromRGB'.format(size, size), 'convs.0'))
conv_i = 1
for i in range(log_size - 2, 0, -1):
reso = 4 * 2 ** i
update(statedict, convert_conv(vars, '{}x{}/Conv0'.format(reso, reso), 'convs.{}.conv1'.format(conv_i)))
update(statedict, convert_conv(vars, '{}x{}/Conv1_down'.format(reso, reso), 'convs.{}.conv2'.format(conv_i), start=1))
update(statedict, convert_conv(vars, '{}x{}/Skip'.format(reso, reso), 'convs.{}.skip'.format(conv_i), start=1, bias=False))
conv_i += 1
update(statedict, convert_conv(vars, '4x4/Conv', 'final_conv'))
update(statedict, convert_dense(vars, '4x4/Dense0', 'final_linear.0'))
update(statedict, convert_dense(vars, 'Output', 'final_linear.1'))
return statedict
def fill_statedict(state_dict, vars, size):
log_size = int(math.log(size, 2))
for i in range(8):
update(state_dict, convert_dense(vars, 'G_mapping/Dense{}'.format(i), 'style.{}'.format(i + 1)))
update(
state_dict,
{
'input.input': torch.from_numpy(
vars['G_synthesis/4x4/Const/const'].value().eval()
)
},
)
update(state_dict, convert_torgb(vars, 'G_synthesis/4x4/ToRGB', 'to_rgb1'))
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_torgb(vars, 'G_synthesis/{}x{}/ToRGB'.format(reso, reso), 'to_rgbs.{}'.format(i)),
)
update(state_dict, convert_modconv(vars, 'G_synthesis/4x4/Conv', 'conv1'))
conv_i = 0
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_modconv(
vars,
'G_synthesis/{}x{}/Conv0_up'.format(reso, reso),
'convs.{}'.format(conv_i),
flip=True,
),
)
update(
state_dict,
convert_modconv(
vars,
'G_synthesis/{}x{}/Conv1'.format(reso, reso),
'convs.{}'.format(conv_i + 1)
),
)
conv_i += 2
for i in range(0, (log_size - 2) * 2 + 1):
update(
state_dict,
{
'noises.noise_{}'.format(i): torch.from_numpy(
vars['G_synthesis/noise{}'.format(i)].value().eval()
)
},
)
return state_dict
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--repo', type=str, required=True)
parser.add_argument('--gen', action='store_true')
parser.add_argument('--disc', action='store_true')
parser.add_argument('path', metavar='PATH')
args = parser.parse_args()
sys.path.append(args.repo)
import dnnlib
from dnnlib import tflib
tflib.init_tf()
with open(args.path, 'rb') as f:
generator, discriminator, g_ema = pickle.load(f)
size = g_ema.output_shape[2]
g = Generator(size, 512, 8)
state_dict = g.state_dict()
state_dict = fill_statedict(state_dict, g_ema.vars, size)
g.load_state_dict(state_dict)
latent_avg = torch.from_numpy(g_ema.vars['dlatent_avg'].value().eval())
ckpt = {'g_ema': state_dict, 'latent_avg': latent_avg}
if args.gen:
g_train = Generator(size, 512, 8)
g_train_state = g_train.state_dict()
g_train_state = fill_statedict(g_train_state, generator.vars, size)
ckpt['g'] = g_train_state
if args.disc:
disc = Discriminator(size)
d_state = disc.state_dict()
d_state = discriminator_fill_statedict(d_state, discriminator.vars, size)
ckpt['d'] = d_state
name = os.path.splitext(os.path.basename(args.path))[0]
torch.save(ckpt, name + '.pt')
batch_size = {256: 16, 512: 9, 1024: 4}
n_sample = batch_size.get(size, 25)
g = g.to(device)
z = np.random.RandomState(0).randn(n_sample, 512).astype('float32')
with torch.no_grad():
img_pt, _ = g([torch.from_numpy(z).to(device)], truncation=0.5, truncation_latent=latent_avg.to(device))
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.randomize_noise = False
img_tf = g_ema.run(z, None, **Gs_kwargs)
img_tf = torch.from_numpy(img_tf).to(device)
img_diff = ((img_pt + 1) / 2).clamp(0.0, 1.0) - ((img_tf.to(device) + 1) / 2).clamp(0.0, 1.0)
img_concat = torch.cat((img_tf, img_pt, img_diff), dim=0)
utils.save_image(img_concat, name + '.png', nrow=n_sample, normalize=True, range=(-1, 1))
| 7,718 | 29.152344 | 131 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/op/upfirdn2d.py | import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'upfirdn2d.cpp'),
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
],
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
return out[:, ::down_y, ::down_x, :]
| 5,186 | 26.590426 | 108 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/op/fused_act.py | import os
import torch
from torch import nn
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
fused = load(
'fused',
sources=[
os.path.join(module_path, 'fused_bias_act.cpp'),
os.path.join(module_path, 'fused_bias_act_kernel.cu'),
],
)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
| 2,379 | 26.356322 | 83 | py |
StyleMask | StyleMask-master/libs/utilities/image_utils.py | import torch
import numpy as np
import cv2
import torchvision
import os
" Read image from path"
def read_image_opencv(image_path):
img = cv2.imread(image_path, cv2.IMREAD_COLOR) # BGR order!!!!
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img.astype('uint8')
" image numpy array to tensor [-1,1] range "
def image_to_tensor(image):
max_val = 1
min_val = -1
if image.shape[0]>256:
image, _ = image_resize(image, 256)
image_tensor = torch.tensor(np.transpose(image,(2,0,1))).float().div(255.0)
image_tensor = image_tensor * (max_val - min_val) + min_val
return image_tensor
def tensor_to_255(image):
img_tmp = image.clone()
min_val = -1
max_val = 1
img_tmp.clamp_(min=min_val, max=max_val)
img_tmp.add_(-min_val).div_(max_val - min_val + 1e-5)
img_tmp = img_tmp.mul(255.0).add(0.0)
return img_tmp
def torch_image_resize(image, width = None, height = None):
dim = None
(h, w) = image.shape[1:]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (height, int(w * r))
scale = r
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (int(h * r), width)
scale = r
image = image.unsqueeze(0)
image = torch.nn.functional.interpolate(image, size=dim, mode='bilinear')
return image.squeeze(0) | 1,548 | 25.706897 | 77 | py |
StyleMask | StyleMask-master/libs/utilities/stylespace_utils.py | import torch
import numpy as np
from torch.nn import functional as F
import os
import math
def conv_warper(layer, input, style, noise):
# the conv should change
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def decoder(G, style_space, latent, noise, resize_image = True):
# an decoder warper for G
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if resize_image:
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
image = face_pool(image)
return image
def encoder(G, noise, truncation, truncation_latent, size = 256, input_is_latent = False):
style_space = []
# an encoder warper for G
inject_index = None
if not input_is_latent:
inject_index = G.n_latent
styles = [noise]
styles = [G.style(s) for s in styles]
else:
styles = [noise]
n_latent = int(math.log(size, 2))* 2 - 2
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
return style_space, latent, noise
| 3,714 | 27.576923 | 90 | py |
StyleMask | StyleMask-master/libs/utilities/dataloader.py | """
"""
import torch
import os
import glob
import cv2
import numpy as np
from torchvision import transforms, utils
from PIL import Image
from torch.utils.data import Dataset
from libs.utilities.utils import make_noise
np.random.seed(0)
class CustomDataset_validation(Dataset):
def __init__(self, synthetic_dataset_path = None, validation_pairs = None, shuffle = True):
"""
Args:
synthetic_dataset_path: path to synthetic latent codes. If None generate random
num_samples: how many samples for validation
"""
self.shuffle = shuffle
self.validation_pairs = validation_pairs
self.synthetic_dataset_path = synthetic_dataset_path
if self.synthetic_dataset_path is not None:
z_codes = np.load(self.synthetic_dataset_path)
z_codes = torch.from_numpy(z_codes)
if self.validation_pairs is not None:
self.num_samples = 2 * self.validation_pairs
if z_codes.shape[0] > self.num_samples:
z_codes = z_codes[:self.num_samples]
else:
self.num_samples = z_codes.shape[0]
self.validation_pairs = int(self.num_samples/2)
else:
self.validation_pairs = int(z_codes.shape[0]/2)
self.num_samples = 2 * self.validation_pairs
self.fixed_source_w = z_codes[:self.validation_pairs, :]
self.fixed_target_w = z_codes[self.validation_pairs:2*self.validation_pairs, :]
else:
self.fixed_source_w = make_noise(self.validation_pairs, 512, None)
self.fixed_target_w = make_noise(self.validation_pairs, 512, None)
# Save random generated latent codes
save_path = './libs/configs/random_latent_codes_{}.npy'.format(self.validation_pairs)
z_codes = torch.cat((self.fixed_source_w, self.fixed_target_w), dim = 0)
np.save(save_path, z_codes.detach().cpu().numpy())
self.transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
def __len__(self):
return self.validation_pairs
def __getitem__(self, index):
source_w = self.fixed_source_w[index]
target_w = self.fixed_target_w[index]
sample = {
'source_w': source_w,
'target_w': target_w
}
return sample
| 2,169 | 29.138889 | 92 | py |
StyleMask | StyleMask-master/libs/utilities/utils.py | import os
import numpy as np
import torch
from torchvision import utils as torch_utils
import glob
from datetime import datetime
import json
from libs.utilities.stylespace_utils import encoder, decoder
def make_path(filepath):
if not os.path.exists(filepath):
os.makedirs(filepath, exist_ok = True)
def save_arguments_json(args, save_path, filename):
out_json = os.path.join(save_path, filename)
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open(out_json, 'w') as out:
stat_dict = args
json.dump(stat_dict, out)
def get_files_frompath(path, types):
files_grabbed = []
for files in types:
files_grabbed.extend(glob.glob(os.path.join(path, files)))
files_grabbed.sort()
return files_grabbed
def make_noise(batch, dim, truncation=None):
if isinstance(dim, int):
dim = [dim]
if truncation is None or truncation == 1.0:
return torch.randn([batch] + dim)
else:
return torch.from_numpy(truncated_noise([batch] + dim, truncation)).to(torch.float)
def calculate_shapemodel(deca_model, images, image_space = 'gan'):
img_tmp = images.clone()
if image_space == 'gan':
# invert image from [-1,1] to [0,255]
min_val = -1; max_val = 1
img_tmp.clamp_(min=min_val, max=max_val)
img_tmp.add_(-min_val).div_(max_val - min_val + 1e-5)
img_tmp = img_tmp.mul(255.0)
p_tensor, alpha_shp_tensor, alpha_exp_tensor, angles, cam = deca_model.extract_DECA_params(img_tmp) # params dictionary
out_dict = {}
out_dict['pose'] = p_tensor
out_dict['alpha_exp'] = alpha_exp_tensor
out_dict['alpha_shp'] = alpha_shp_tensor
out_dict['cam'] = cam
return out_dict, angles.cuda()
def generate_image(G, latent_code, truncation, trunc, image_resolution, split_sections, input_is_latent = False, return_latents = False, resize_image = True):
img, _ = G([latent_code], return_latents = return_latents, truncation = truncation, truncation_latent = trunc, input_is_latent = input_is_latent)
style_space, w, noise = encoder(G, latent_code, truncation, trunc, size = image_resolution, input_is_latent = input_is_latent)
if resize_image:
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
img = face_pool(img)
return img, style_space, w, noise
def generate_new_stylespace(style_source, style_target, mask, num_layers_control = None):
if num_layers_control is not None:
new_style_space = style_source.clone()
mask_size = mask.shape[1]
new_style_space[:, :mask_size] = mask * style_target[:, :mask_size] + (1-mask) * style_source[:, :mask_size]
else:
new_style_space = mask * style_target + (1-mask) * style_source
return new_style_space
def save_image(image, save_image_dir):
grid = torch_utils.save_image(
image,
save_image_dir,
normalize=True,
range=(-1, 1),
)
def save_grid(source_img, target_img, reenacted_img, save_path):
dim = source_img.shape[2]
grid_image = torch.zeros(3, dim , 3 * dim)
grid_image[:, :, :dim] = source_img.squeeze(0)
grid_image[:, :, dim:dim*2] = target_img.squeeze(0)
grid_image[:, :, dim*2:] = reenacted_img.squeeze(0)
save_image(grid_image, save_path)
| 3,116 | 32.880435 | 158 | py |
StyleMask | StyleMask-master/libs/utilities/utils_inference.py | import os
import numpy as np
import torch
from torchvision import utils as torch_utils
import cv2
from skimage import io
from libs.utilities.image_utils import read_image_opencv, torch_image_resize
from libs.utilities.ffhq_cropping import align_crop_image
def calculate_evaluation_metrics(params_shifted, params_target, angles_shifted, angles_target, imgs_shifted, imgs_source, id_loss_, exp_ranges):
############ Evaluation ############
yaw_reenacted = angles_shifted[:,0][0].detach().cpu().numpy()
pitch_reenacted = angles_shifted[:,1][0].detach().cpu().numpy()
roll_reenacted = angles_shifted[:,2][0].detach().cpu().numpy()
exp_reenacted = params_shifted['alpha_exp'][0].detach().cpu().numpy()
jaw_reenacted = params_shifted['pose'][0, 3].detach().cpu().numpy()
yaw_target = angles_target[:,0][0].detach().cpu().numpy()
pitch_target = angles_target[:,1][0].detach().cpu().numpy()
roll_target = angles_target[:,2][0].detach().cpu().numpy()
exp_target = params_target['alpha_exp'][0].detach().cpu().numpy()
jaw_target = params_target['pose'][0, 3].detach().cpu().numpy()
exp_error = []
num_expressions = 20
max_range = exp_ranges[3][1]
min_range = exp_ranges[3][0]
jaw_target = (jaw_target - min_range)/(max_range-min_range)
jaw_reenacted = (jaw_reenacted - min_range)/(max_range-min_range)
exp_error.append(abs(jaw_reenacted - jaw_target))
for j in range(num_expressions):
max_range = exp_ranges[j+4][1]
min_range = exp_ranges[j+4][0]
target = (exp_target[j] - min_range)/(max_range-min_range)
reenacted = (exp_reenacted[j] - min_range)/(max_range-min_range)
exp_error.append(abs(reenacted - target) )
exp_error = np.mean(exp_error)
## normalize exp coef in [0,1]
# exp_error = []
# num_expressions = 12 # len(exp_target)
# for j in range(num_expressions):
# exp_error.append(abs(exp_reenacted[j] - exp_target[j]) )
# exp_error.append(abs(jaw_reenacted - jaw_target))
# exp_error = np.mean(exp_error)
pose = (abs(yaw_reenacted-yaw_target) + abs(pitch_reenacted-pitch_target) + abs(roll_reenacted-roll_target))/3
################################################
###### CSIM ######
loss_identity = id_loss_(imgs_shifted, imgs_source)
csim = 1 - loss_identity.data.item()
return csim, pose, exp_error
def generate_grid_image(source, target, reenacted):
num_images = source.shape[0] # batch size
width = 256; height = 256
grid_image = torch.zeros((3, num_images*height, 3*width))
for i in range(num_images):
s = i*height
e = s + height
grid_image[:, s:e, :width] = source[i, :, :, :]
grid_image[:, s:e, width:2*width] = target[i, :, :, :]
grid_image[:, s:e, 2*width:] = reenacted[i, :, :, :]
if grid_image.shape[1] > 1000: # height
grid_image = torch_image_resize(grid_image, height = 800)
return grid_image
" Crop images using facial landmarks like FFHQ "
def preprocess_image(image_path, landmarks_est, save_filename = None):
image = read_image_opencv(image_path)
landmarks = landmarks_est.get_landmarks(image)[0]
landmarks = np.asarray(landmarks)
img = align_crop_image(image, landmarks)
if img is not None and save_filename is not None:
cv2.imwrite(save_filename, cv2.cvtColor(img.copy(), cv2.COLOR_RGB2BGR))
if img is not None:
return img
else:
print('Error with image preprocessing')
exit()
" Invert real image into the latent space of StyleGAN2 "
def invert_image(image, encoder, generator, truncation, trunc, save_path = None, save_name = None):
with torch.no_grad():
latent_codes = encoder(image)
inverted_images, _ = generator([latent_codes], input_is_latent=True, return_latents = False, truncation= truncation, truncation_latent=trunc)
if save_path is not None and save_name is not None:
grid = torch_utils.save_image(
inverted_images,
os.path.join(save_path, '{}.png'.format(save_name)),
normalize=True,
range=(-1, 1),
)
# Latent code
latent_code = latent_codes[0].detach().cpu().numpy()
save_dir = os.path.join(save_path, '{}.npy'.format(save_name))
np.save(save_dir, latent_code)
return inverted_images, latent_codes | 4,100 | 36.281818 | 144 | py |
StyleMask | StyleMask-master/libs/criteria/losses.py | import torch
import numpy as np
"""
Calculate shape losses
"""
class Losses():
def __init__(self):
self.criterion_mse = torch.nn.MSELoss()
self.criterion_l1 = torch.nn.L1Loss()
self.image_deca_size = 224
def calculate_pixel_wise_loss(self, images_shifted, images):
pixel_wise_loss = self.criterion_l1(images, images_shifted)
return pixel_wise_loss
def calculate_shape_loss(self, shape_gt, shape_reenacted, normalize = False):
criterion_l1 = torch.nn.L1Loss()
if normalize:
shape_gt_norm = shape_gt/200 #self.image_deca_size
shape_reenacted_norm = shape_reenacted/200 #self.image_deca_size
loss = criterion_l1(shape_gt_norm, shape_reenacted_norm)
else:
loss = criterion_l1(shape_gt, shape_reenacted)
return loss
def calculate_eye_loss(self, shape_gt, shape_reenacted):
criterion_l1 = torch.nn.L1Loss()
shape_gt_norm = shape_gt.clone()
shape_reenacted_norm = shape_reenacted.clone()
# shape_gt_norm = shape_gt_norm/self.image_deca_size
# shape_reenacted_norm = shape_reenacted_norm/self.image_deca_size
eye_pairs = [(36, 39), (37, 41), (38, 40), (42, 45), (43, 47), (44, 46)]
loss = 0
for i in range(len(eye_pairs)):
pair = eye_pairs[i]
d_gt = abs(shape_gt[:, pair[0],:] - shape_gt[:, pair[1],:])
d_e = abs(shape_reenacted[:, pair[0],:] - shape_reenacted[:, pair[1],:])
loss += criterion_l1(d_gt, d_e)
loss = loss/len(eye_pairs)
return loss
def calculate_mouth_loss(self, shape_gt, shape_reenacted):
criterion_l1 = torch.nn.L1Loss()
shape_gt_norm = shape_gt.clone()
shape_reenacted_norm = shape_reenacted.clone()
# shape_gt_norm = shape_gt_norm/self.image_deca_size
# shape_reenacted_norm = shape_reenacted_norm/self.image_deca_size
mouth_pairs = [(48, 54), (49, 59), (50, 58), (51, 57), (52, 56), (53, 55), (60, 64), (61, 67), (62, 66), (63, 65)]
loss = 0
for i in range(len(mouth_pairs)):
pair = mouth_pairs[i]
d_gt = abs(shape_gt[:, pair[0],:] - shape_gt[:, pair[1],:])
d_e = abs(shape_reenacted[:, pair[0],:] - shape_reenacted[:, pair[1],:])
loss += criterion_l1(d_gt, d_e)
loss = loss/len(mouth_pairs)
return loss
| 2,137 | 32.40625 | 116 | py |
StyleMask | StyleMask-master/libs/criteria/model_irse.py | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from .helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
if input_size == 112:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512, affine=affine))
else:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 14 * 14, 512),
BatchNorm1d(512, affine=affine))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def IR_50(input_size):
"""Constructs a ir-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_101(input_size):
"""Constructs a ir-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_152(input_size):
"""Constructs a ir-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_SE_50(input_size):
"""Constructs a ir_se-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_101(input_size):
"""Constructs a ir_se-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_152(input_size):
"""Constructs a ir_se-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
return model
| 2,821 | 32.2 | 97 | py |
StyleMask | StyleMask-master/libs/criteria/l2_loss.py | import torch
l2_criterion = torch.nn.MSELoss(reduction='mean')
def l2_loss(real_images, generated_images):
loss = l2_criterion(real_images, generated_images)
return loss
| 181 | 19.222222 | 54 | py |
StyleMask | StyleMask-master/libs/criteria/helpers.py | from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
| 3,556 | 28.641667 | 112 | py |
StyleMask | StyleMask-master/libs/criteria/id_loss.py | import torch
from torch import nn
from .model_irse import Backbone
import os
import torch.backends.cudnn as cudnn
class IDLoss(nn.Module):
def __init__(self, pretrained_model_path = './pretrained_models/model_ir_se50.pth'):
super(IDLoss, self).__init__()
print('Loading ResNet ArcFace for identity loss')
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
if not os.path.exists(pretrained_model_path):
print('ir_se50 model does not exist in {}'.format(pretrained_model_path))
exit()
self.facenet.load_state_dict(torch.load(pretrained_model_path))
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
self.facenet.eval()
self.criterion = nn.CosineSimilarity(dim=1, eps=1e-6)
def extract_feats(self, x, crop = True):
if crop:
x = x[:, :, 35:223, 32:220] # Crop interesting region
x = self.face_pool(x)
x_feats = self.facenet(x)
return x_feats
def forward(self, y_hat, y, crop = True):
n_samples = y.shape[0]
y_feats = self.extract_feats(y, crop)
y_hat_feats = self.extract_feats(y_hat, crop)
cosine_sim = self.criterion(y_hat_feats, y_feats.detach())
loss = 1 - cosine_sim
loss = torch.mean(loss)
return loss
| 1,349 | 37.571429 | 92 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/lpips.py | import torch
import torch.nn as nn
from .networks import get_network, LinLayers
from .utils import get_state_dict
class LPIPS(nn.Module):
r"""Creates a criterion that measures https://github.com/eladrich/pixel2style2pixel
Learned Perceptual Image Patch Similarity (LPIPS).
Arguments:
net_type (str): the network type to compare the features:
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
version (str): the version of LPIPS. Default: 0.1.
"""
def __init__(self, net_type: str = 'alex', version: str = '0.1'):
assert version in ['0.1'], 'v0.1 is only supported now'
super(LPIPS, self).__init__()
# pretrained network
self.net = get_network(net_type).to("cuda")
# linear layers
self.lin = LinLayers(self.net.n_channels_list).to("cuda")
self.lin.load_state_dict(get_state_dict(net_type, version))
def forward(self, x: torch.Tensor, y: torch.Tensor):
feat_x, feat_y = self.net(x), self.net(y)
diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
return torch.sum(torch.cat(res, 0)) / x.shape[0]
| 1,220 | 33.885714 | 87 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/utils.py | from collections import OrderedDict
import torch
def normalize_activation(x, eps=1e-10):
# print(torch.sum(x ** 2, dim=1, keepdim=True))
# if torch.isnan(x).any():
# # print(gradients_keep)
# pdb.set_trace()
norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)+1e-9)
return x / (norm_factor + eps)
def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
# build url
url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
+ f'master/lpips/weights/v{version}/{net_type}.pth'
# download
old_state_dict = torch.hub.load_state_dict_from_url(
url, progress=True,
map_location=None if torch.cuda.is_available() else torch.device('cpu')
)
# rename keys
new_state_dict = OrderedDict()
for key, val in old_state_dict.items():
new_key = key
new_key = new_key.replace('lin', '')
new_key = new_key.replace('model.', '')
new_state_dict[new_key] = val
return new_state_dict
| 1,033 | 28.542857 | 79 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/networks.py | from typing import Sequence
from itertools import chain
import torch
import torch.nn as nn
from torchvision import models
from .utils import normalize_activation
def get_network(net_type: str):
if net_type == 'alex':
return AlexNet()
elif net_type == 'squeeze':
return SqueezeNet()
elif net_type == 'vgg':
return VGG16()
else:
raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
class LinLayers(nn.ModuleList):
def __init__(self, n_channels_list: Sequence[int]):
super(LinLayers, self).__init__([
nn.Sequential(
nn.Identity(),
nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
) for nc in n_channels_list
])
for param in self.parameters():
param.requires_grad = False
class BaseNet(nn.Module):
def __init__(self):
super(BaseNet, self).__init__()
# register buffer
self.register_buffer(
'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer(
'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
def set_requires_grad(self, state: bool):
for param in chain(self.parameters(), self.buffers()):
param.requires_grad = state
def z_score(self, x: torch.Tensor):
return (x - self.mean) / self.std
def forward(self, x: torch.Tensor):
x = self.z_score(x)
output = []
for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
x = layer(x)
if i in self.target_layers:
output.append(normalize_activation(x))
if len(output) == len(self.target_layers):
break
return output
class SqueezeNet(BaseNet):
def __init__(self):
super(SqueezeNet, self).__init__()
self.layers = models.squeezenet1_1(True).features
self.target_layers = [2, 5, 8, 10, 11, 12, 13]
self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
self.set_requires_grad(False)
class AlexNet(BaseNet):
def __init__(self):
super(AlexNet, self).__init__()
self.layers = models.alexnet(True).features
self.target_layers = [2, 5, 8, 10, 12]
self.n_channels_list = [64, 192, 384, 256, 256]
self.set_requires_grad(False)
class VGG16(BaseNet):
def __init__(self):
super(VGG16, self).__init__()
self.layers = models.vgg16(True).features
self.target_layers = [4, 9, 16, 23, 30]
self.n_channels_list = [64, 128, 256, 512, 512]
self.set_requires_grad(False) | 2,653 | 26.645833 | 79 | py |
StyleMask | StyleMask-master/libs/DECA/estimate_DECA.py | """
"""
import torch
import numpy as np
import cv2
import os
from .decalib.deca import DECA
from .decalib.datasets import datasets
from .decalib.utils import util
from .decalib.utils.config import cfg as deca_cfg
from .decalib.utils.rotation_converter import *
class DECA_model():
def __init__(self, device):
deca_cfg.model.use_tex = False
dir_path = os.path.dirname(os.path.realpath(__file__))
models_path = os.path.join(dir_path, 'data')
if not os.path.exists(models_path):
print('Please download the required data for DECA model. See Readme.')
exit()
self.deca = DECA(config = deca_cfg, device=device)
self.data = datasets.TestData()
'Batch torch tensor'
def extract_DECA_params(self, images):
p_tensor = torch.zeros(images.shape[0], 6).cuda()
alpha_shp_tensor = torch.zeros(images.shape[0], 100).cuda()
alpha_exp_tensor = torch.zeros(images.shape[0], 50).cuda()
angles = torch.zeros(images.shape[0], 3).cuda()
cam = torch.zeros(images.shape[0], 3).cuda()
for batch in range(images.shape[0]):
image_prepro, error_flag = self.data.get_image_tensor(images[batch].clone())
if not error_flag:
codedict = self.deca.encode(image_prepro.unsqueeze(0).cuda())
pose = codedict['pose'][:,:3]
pose = rad2deg(batch_axis2euler(pose))
p_tensor[batch] = codedict['pose'][0]
alpha_shp_tensor[batch] = codedict['shape'][0]
alpha_exp_tensor[batch] = codedict['exp'][0]
cam[batch] = codedict['cam'][0]
angles[batch] = pose
else:
angles[batch][0] = -180
angles[batch][1] = -180
angles[batch][2] = -180
return p_tensor, alpha_shp_tensor, alpha_exp_tensor, angles, cam
def calculate_shape(self, coefficients, image = None, save_path = None, prefix = None):
landmarks2d, landmarks3d, points = self.deca.decode(coefficients)
return landmarks2d, landmarks3d, points
| 2,153 | 36.137931 | 94 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/deca.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import os, sys
import torch
import torchvision
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from time import time
from skimage.io import imread
import cv2
import pickle
from .utils.renderer import SRenderY
from .models.encoders import ResnetEncoder
from .models.FLAME import FLAME, FLAMETex
from .models.decoders import Generator
from .utils import util
from .utils.rotation_converter import batch_euler2axis
from .datasets import datasets
from .utils.config import cfg
torch.backends.cudnn.benchmark = True
class DECA(object):
def __init__(self, config=None, device='cuda'):
if config is None:
self.cfg = cfg
else:
self.cfg = config
self.device = device
self.image_size = self.cfg.dataset.image_size
self.uv_size = self.cfg.model.uv_size
self._create_model(self.cfg.model)
self._setup_renderer(self.cfg.model)
def _setup_renderer(self, model_cfg):
self.render = SRenderY(self.image_size, obj_filename=model_cfg.topology_path, uv_size=model_cfg.uv_size).to(self.device)
# face mask for rendering details
mask = imread(model_cfg.face_eye_mask_path).astype(np.float32)/255.; mask = torch.from_numpy(mask[:,:,0])[None,None,:,:].contiguous()
self.uv_face_eye_mask = F.interpolate(mask, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
mask = imread(model_cfg.face_mask_path).astype(np.float32)/255.; mask = torch.from_numpy(mask[:,:,0])[None,None,:,:].contiguous()
self.uv_face_mask = F.interpolate(mask, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
# displacement correction
fixed_dis = np.load(model_cfg.fixed_displacement_path)
self.fixed_uv_dis = torch.tensor(fixed_dis).float().to(self.device)
# mean texture
mean_texture = imread(model_cfg.mean_tex_path).astype(np.float32)/255.; mean_texture = torch.from_numpy(mean_texture.transpose(2,0,1))[None,:,:,:].contiguous()
self.mean_texture = F.interpolate(mean_texture, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
# dense mesh template, for save detail mesh
self.dense_template = np.load(model_cfg.dense_template_path, allow_pickle=True, encoding='latin1').item()
def _create_model(self, model_cfg):
# set up parameters
self.n_param = model_cfg.n_shape+model_cfg.n_tex+model_cfg.n_exp+model_cfg.n_pose+model_cfg.n_cam+model_cfg.n_light
self.n_detail = model_cfg.n_detail
self.n_cond = model_cfg.n_exp + 3 # exp + jaw pose
self.num_list = [model_cfg.n_shape, model_cfg.n_tex, model_cfg.n_exp, model_cfg.n_pose, model_cfg.n_cam, model_cfg.n_light]
self.param_dict = {i:model_cfg.get('n_' + i) for i in model_cfg.param_list}
# encoders
self.E_flame = ResnetEncoder(outsize=self.n_param).to(self.device)
self.E_detail = ResnetEncoder(outsize=self.n_detail).to(self.device)
# decoders
self.flame = FLAME(model_cfg).to(self.device)
if model_cfg.use_tex:
self.flametex = FLAMETex(model_cfg).to(self.device)
self.D_detail = Generator(latent_dim=self.n_detail+self.n_cond, out_channels=1, out_scale=model_cfg.max_z, sample_mode = 'bilinear').to(self.device)
# resume model
model_path = self.cfg.pretrained_modelpath
if os.path.exists(model_path):
print(f'trained model found. Load {model_path}')
checkpoint = torch.load(model_path)
self.checkpoint = checkpoint
util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame'])
util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail'])
util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail'])
else:
print(f'please check model path: {model_path}')
exit()
# eval mode
self.E_flame.eval()
self.E_detail.eval()
self.D_detail.eval()
def decompose_code(self, code, num_dict):
''' Convert a flattened parameter vector to a dictionary of parameters
code_dict.keys() = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']
'''
code_dict = {}
start = 0
for key in num_dict:
end = start+int(num_dict[key])
code_dict[key] = code[:, start:end]
start = end
if key == 'light':
code_dict[key] = code_dict[key].reshape(code_dict[key].shape[0], 9, 3)
return code_dict
def displacement2normal(self, uv_z, coarse_verts, coarse_normals):
''' Convert displacement map into detail normal map
'''
batch_size = uv_z.shape[0]
uv_coarse_vertices = self.render.world2uv(coarse_verts).detach()
uv_coarse_normals = self.render.world2uv(coarse_normals).detach()
uv_z = uv_z*self.uv_face_eye_mask
uv_detail_vertices = uv_coarse_vertices + uv_z*uv_coarse_normals + self.fixed_uv_dis[None,None,:,:]*uv_coarse_normals.detach()
dense_vertices = uv_detail_vertices.permute(0,2,3,1).reshape([batch_size, -1, 3])
uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1))
uv_detail_normals = uv_detail_normals.reshape([batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0,3,1,2)
return uv_detail_normals
def displacement2vertex(self, uv_z, coarse_verts, coarse_normals):
''' Convert displacement map into detail vertices
'''
batch_size = uv_z.shape[0]
uv_coarse_vertices = self.render.world2uv(coarse_verts).detach()
uv_coarse_normals = self.render.world2uv(coarse_normals).detach()
uv_z = uv_z*self.uv_face_eye_mask
uv_detail_vertices = uv_coarse_vertices + uv_z*uv_coarse_normals + self.fixed_uv_dis[None,None,:,:]*uv_coarse_normals.detach()
dense_vertices = uv_detail_vertices.permute(0,2,3,1).reshape([batch_size, -1, 3])
# uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1))
# uv_detail_normals = uv_detail_normals.reshape([batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0,3,1,2)
detail_faces = self.render.dense_faces
return dense_vertices, detail_faces
def visofp(self, normals):
''' visibility of keypoints, based on the normal direction
'''
normals68 = self.flame.seletec_3d68(normals)
vis68 = (normals68[:,:,2:] < 0.1).float()
return vis68
# @torch.no_grad()
def encode(self, images):
batch_size = images.shape[0]
parameters = self.E_flame(images)
detailcode = self.E_detail(images)
codedict = self.decompose_code(parameters, self.param_dict)
codedict['detail'] = detailcode
codedict['images'] = images
return codedict
@torch.no_grad()
def decode_deca(self, codedict):
images = codedict['images']
batch_size = images.shape[0]
## decode
verts, landmarks2d, landmarks3d = self.flame(shape_params=codedict['shape'], expression_params=codedict['exp'], pose_params=codedict['pose'])
uv_z = self.D_detail(torch.cat([codedict['pose'][:,3:], codedict['exp'], codedict['detail']], dim=1))
if self.cfg.model.use_tex:
albedo = self.flametex(codedict['tex'])
else:
albedo = torch.zeros([batch_size, 3, self.uv_size, self.uv_size], device=images.device)
## projection
landmarks2d = util.batch_orth_proj(landmarks2d, codedict['cam'])[:,:,:2]; landmarks2d[:,:,1:] = -landmarks2d[:,:,1:]; landmarks2d = landmarks2d*self.image_size/2 + self.image_size/2
landmarks3d = util.batch_orth_proj(landmarks3d, codedict['cam']); landmarks3d[:,:,1:] = -landmarks3d[:,:,1:]; landmarks3d = landmarks3d*self.image_size/2 + self.image_size/2
trans_verts = util.batch_orth_proj(verts, codedict['cam']); trans_verts[:,:,1:] = -trans_verts[:,:,1:]
## rendering
ops = self.render(verts, trans_verts, albedo, codedict['light'])
uv_detail_normals = self.displacement2normal(uv_z, verts, ops['normals'])
uv_shading = self.render.add_SHlight(uv_detail_normals, codedict['light'])
uv_texture = albedo*uv_shading
landmarks3d_vis = self.visofp(ops['transformed_normals'])
landmarks3d = torch.cat([landmarks3d, landmarks3d_vis], dim=2)
## render shape
shape_images = self.render.render_shape(verts, trans_verts)
detail_normal_images = F.grid_sample(uv_detail_normals, ops['grid'], align_corners=False)*ops['alpha_images']
shape_detail_images = self.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images)
## extract texture
## TODO: current resolution 256x256, support higher resolution, and add visibility
uv_pverts = self.render.world2uv(trans_verts)
uv_gt = F.grid_sample(images, uv_pverts.permute(0,2,3,1)[:,:,:,:2], mode='bilinear')
if self.cfg.model.use_tex:
## TODO: poisson blending should give better-looking results
uv_texture_gt = uv_gt[:,:3,:,:]*self.uv_face_eye_mask + (uv_texture[:,:3,:,:]*(1-self.uv_face_eye_mask)*0.7)
else:
uv_texture_gt = uv_gt[:,:3,:,:]*self.uv_face_eye_mask + (torch.ones_like(uv_gt[:,:3,:,:])*(1-self.uv_face_eye_mask)*0.7)
## output
opdict = {
'vertices': verts,
'normals': ops['normals'],
'transformed_vertices': trans_verts,
'landmarks2d': landmarks2d,
'landmarks3d': landmarks3d,
'uv_detail_normals': uv_detail_normals,
'uv_texture_gt': uv_texture_gt,
'displacement_map': uv_z+self.fixed_uv_dis[None,None,:,:],
}
if self.cfg.model.use_tex:
opdict['albedo'] = albedo
opdict['uv_texture'] = uv_texture
visdict = {
'inputs': images,
'landmarks2d': util.tensor_vis_landmarks(images, landmarks2d, isScale=False),
'landmarks3d': util.tensor_vis_landmarks(images, landmarks3d, isScale=False),
'shape_images': shape_images,
'shape_detail_images': shape_detail_images
}
if self.cfg.model.use_tex:
visdict['rendered_images'] = ops['images']
return opdict, visdict
def decode(self, codedict):
images = codedict['shape']
batch_size = images.shape[0]
verts, landmarks2d, landmarks3d = self.flame(shape_params=codedict['shape'], expression_params=codedict['exp'], pose_params=codedict['pose'])
landmarks2d = util.batch_orth_proj(landmarks2d, codedict['cam'])[:,:,:2]; landmarks2d[:,:,1:] = -landmarks2d[:,:,1:]; landmarks2d = landmarks2d*self.image_size/2 + self.image_size/2
trans_verts = util.batch_orth_proj(verts, codedict['cam']); trans_verts[:,:,1:] = -trans_verts[:,:,1:]; trans_verts = trans_verts*self.image_size/2 + self.image_size/2
landmarks3d = util.batch_orth_proj(landmarks3d, codedict['cam']); landmarks3d[:,:,1:] = -landmarks3d[:,:,1:]; landmarks3d = landmarks3d*self.image_size/2 + self.image_size/2
return landmarks2d, landmarks3d, trans_verts
def visualize(self, visdict, size=None):
grids = {}
if size is None:
size = self.image_size
for key in visdict:
grids[key] = torchvision.utils.make_grid(F.interpolate(visdict[key], [size, size])).detach().cpu()
grid = torch.cat(list(grids.values()), 2)
grid_image = (grid.numpy().transpose(1,2,0).copy()*255)[:,:,[2,1,0]]
grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8)
return grid_image
def save_obj(self, filename, opdict):
'''
vertices: [nv, 3], tensor
texture: [3, h, w], tensor
'''
i = 0
vertices = opdict['vertices'][i].cpu().numpy()
faces = self.render.faces[0].cpu().numpy()
texture = util.tensor2image(opdict['uv_texture_gt'][i])
uvcoords = self.render.raw_uvcoords[0].cpu().numpy()
uvfaces = self.render.uvfaces[0].cpu().numpy()
# save coarse mesh, with texture and normal map
normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5)
util.write_obj(filename, vertices, faces,
texture=texture,
uvcoords=uvcoords,
uvfaces=uvfaces,
normal_map=normal_map)
# upsample mesh, save detailed mesh
texture = texture[:,:,[2,1,0]]
normals = opdict['normals'][i].cpu().numpy()
displacement_map = opdict['displacement_map'][i].cpu().numpy().squeeze()
dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, self.dense_template)
util.write_obj(filename.replace('.obj', '_detail.obj'),
dense_vertices,
dense_faces,
colors = dense_colors,
inverse_face_order=True)
def save_ply(self, filename, opdict):
'''
vertices: [nv, 3], tensor
texture: [3, h, w], tensor
'''
header_temp = """ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
element face {}
property list uchar int vertex_indices
end_header
"""
i = 0
vertices = opdict['vertices'].squeeze(0).cpu().numpy()
print(vertices.shape)
faces = self.render.faces[0].cpu().numpy()
print(faces.shape)
n_vertex = vertices.shape[0]
n_face = faces.shape[0]
header = header_temp.format(n_vertex, n_face)
print(header)
with open(filename, 'w') as f:
f.write(header + '\n')
for i in range(n_vertex):
x, y, z = vertices[i, :]
# if reverse:
# f.write(f'{x:.2f} {height-y:.2f} {z:.2f}\n')
# else:
f.write(f'{x:.2f} {y:.2f} {z:.2f}\n')
for i in range(n_face):
idx1, idx2, idx3 = faces[i] # m x 3
# if reverse:
# f.write(f'3 {idx3} {idx2} {idx1}\n')
# else:
f.write(f'3 {idx1} {idx2} {idx3}\n')
print(f'Dump tp {filename}') | 15,374 | 46.307692 | 189 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/resnet.py | """
Author: Soubhik Sanyal
Copyright (c) 2019, Soubhik Sanyal
All rights reserved.
Loads different resnet models
"""
'''
file: Resnet.py
date: 2018_05_02
author: zhangxiong(1025679612@qq.com)
mark: copied from pytorch source code
'''
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.parameter import Parameter
import torch.optim as optim
import numpy as np
import math
import torchvision
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x1 = self.layer4(x)
x2 = self.avgpool(x1)
x2 = x2.view(x2.size(0), -1)
# x = self.fc(x)
## x2: [bz, 2048] for shape
## x1: [bz, 2048, 7, 7] for texture
return x2
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def copy_parameter_from_resnet(model, resnet_dict):
cur_state_dict = model.state_dict()
# import ipdb; ipdb.set_trace()
for name, param in list(resnet_dict.items())[0:None]:
if name not in cur_state_dict:
# print(name, ' not available in reconstructed resnet')
continue
if isinstance(param, Parameter):
param = param.data
try:
cur_state_dict[name].copy_(param)
except:
print(name, ' is inconsistent!')
continue
# print('copy resnet state dict finished!')
# import ipdb; ipdb.set_trace()
def load_ResNet50Model():
model = ResNet(Bottleneck, [3, 4, 6, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet50(pretrained = True).state_dict())
return model
def load_ResNet101Model():
model = ResNet(Bottleneck, [3, 4, 23, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())
return model
def load_ResNet152Model():
model = ResNet(Bottleneck, [3, 8, 36, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())
return model
# model.load_state_dict(checkpoint['model_state_dict'])
######## Unet
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = F.normalize(x)
return x | 9,332 | 31.072165 | 122 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/lbs.py | # -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
batch_size, -1, 3)
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
# transforms_mat = transform_mat(
# rot_mats.view(-1, 3, 3),
# rel_joints.view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms | 13,783 | 35.465608 | 79 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/FLAME.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
from .lbs import lbs, batch_rodrigues, vertices2landmarks, rot_mat_to_euler
def to_tensor(array, dtype=torch.float32):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
class FLAME(nn.Module):
"""
borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py
Given flame parameters this class generates a differentiable FLAME function
which outputs the a mesh and 2D/3D facial landmarks
"""
def __init__(self, config):
super(FLAME, self).__init__()
print("creating the FLAME Decoder")
with open(config.flame_model_path, 'rb') as f:
ss = pickle.load(f, encoding='latin1')
flame_model = Struct(**ss)
self.dtype = torch.float32
self.register_buffer('faces_tensor', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))
# The vertices of the template model
self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))
# The shape components and expression
shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)
shapedirs = torch.cat([shapedirs[:,:,:config.n_shape], shapedirs[:,:,300:300+config.n_exp]], 2)
self.register_buffer('shapedirs', shapedirs)
# The pose components
num_pose_basis = flame_model.posedirs.shape[-1]
posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))
#
self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))
parents = to_tensor(to_np(flame_model.kintree_table[0])).long(); parents[0] = -1
self.register_buffer('parents', parents)
self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))
# Fixing Eyeball and neck rotation
default_eyball_pose = torch.zeros([1, 6], dtype=self.dtype, requires_grad=False)
self.register_parameter('eye_pose', nn.Parameter(default_eyball_pose,
requires_grad=False))
default_neck_pose = torch.zeros([1, 3], dtype=self.dtype, requires_grad=False)
self.register_parameter('neck_pose', nn.Parameter(default_neck_pose,
requires_grad=False))
# Static and Dynamic Landmark embeddings for FLAME
lmk_embeddings = np.load(config.flame_lmk_embedding_path, allow_pickle=True, encoding='latin1')
lmk_embeddings = lmk_embeddings[()]
self.register_buffer('lmk_faces_idx', torch.from_numpy(lmk_embeddings['static_lmk_faces_idx']).long())
self.register_buffer('lmk_bary_coords', torch.from_numpy(lmk_embeddings['static_lmk_bary_coords']).to(self.dtype))
self.register_buffer('dynamic_lmk_faces_idx', lmk_embeddings['dynamic_lmk_faces_idx'].long())
self.register_buffer('dynamic_lmk_bary_coords', lmk_embeddings['dynamic_lmk_bary_coords'].to(self.dtype))
self.register_buffer('full_lmk_faces_idx', torch.from_numpy(lmk_embeddings['full_lmk_faces_idx']).long())
self.register_buffer('full_lmk_bary_coords', torch.from_numpy(lmk_embeddings['full_lmk_bary_coords']).to(self.dtype))
neck_kin_chain = []; NECK_IDX=1
curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)
while curr_idx != -1:
neck_kin_chain.append(curr_idx)
curr_idx = self.parents[curr_idx]
self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))
def _find_dynamic_lmk_idx_and_bcoords(self, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
"""
Selects the face contour depending on the reletive position of the head
Input:
vertices: N X num_of_vertices X 3
pose: N X full pose
dynamic_lmk_faces_idx: The list of contour face indexes
dynamic_lmk_b_coords: The list of contour barycentric weights
neck_kin_chain: The tree to consider for the relative rotation
dtype: Data type
return:
The contour face indexes and the corresponding barycentric weights
"""
batch_size = pose.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=pose.device,
dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):
"""
Calculates landmarks by barycentric interpolation
Input:
vertices: torch.tensor NxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor (N*F)x3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor N X L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns:
landmarks: torch.tensor NxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
"""
# Extract the indices of the vertices for each face
# NxLx3
batch_size, num_verts = vertices.shape[:dd2]
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
1, -1, 3).view(batch_size, lmk_faces_idx.shape[1], -1)
lmk_faces += torch.arange(batch_size, dtype=torch.long).view(-1, 1, 1).to(
device=vertices.device) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces]
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def seletec_3d68(self, vertices):
landmarks3d = vertices2landmarks(vertices, self.faces_tensor,
self.full_lmk_faces_idx.repeat(vertices.shape[0], 1),
self.full_lmk_bary_coords.repeat(vertices.shape[0], 1, 1))
return landmarks3d
def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):
"""
Input:
shape_params: N X number of shape parameters
expression_params: N X number of expression parameters
pose_params: N X number of pose parameters (6)
return:d
vertices: N X V X 3
landmarks: N X number of landmarks X 3
"""
batch_size = shape_params.shape[0]
if eye_pose_params is None:
eye_pose_params = self.eye_pose.expand(batch_size, -1)
betas = torch.cat([shape_params, expression_params], dim=1)
full_pose = torch.cat([pose_params[:, :3], self.neck_pose.expand(batch_size, -1), pose_params[:, 3:], eye_pose_params], dim=1)
template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)
vertices, _ = lbs(betas, full_pose, template_vertices,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, dtype=self.dtype)
lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)
lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)
dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(
full_pose, self.dynamic_lmk_faces_idx,
self.dynamic_lmk_bary_coords,
self.neck_kin_chain, dtype=self.dtype)
lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)
lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)
landmarks2d = vertices2landmarks(vertices, self.faces_tensor,
lmk_faces_idx,
lmk_bary_coords)
bz = vertices.shape[0]
landmarks3d = vertices2landmarks(vertices, self.faces_tensor,
self.full_lmk_faces_idx.repeat(bz, 1),
self.full_lmk_bary_coords.repeat(bz, 1, 1))
return vertices, landmarks2d, landmarks3d
class FLAMETex(nn.Module):
"""
FLAME texture:
https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64
FLAME texture converted from BFM:
https://github.com/TimoBolkart/BFM_to_FLAME
"""
def __init__(self, config):
super(FLAMETex, self).__init__()
if config.tex_type == 'BFM':
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
tex_path = config.tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
elif config.tex_type == 'FLAME':
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
tex_path = config.flame_tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)/255.
texture_basis = tex_space[pc_key].reshape(-1, n_pc)/255.
else:
print('texture type ', config.tex_type, 'not exist!')
raise NotImplementedError
n_tex = config.n_tex
num_components = texture_basis.shape[1]
texture_mean = torch.from_numpy(texture_mean).float()[None,...]
texture_basis = torch.from_numpy(texture_basis[:,:n_tex]).float()[None,...]
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
def forward(self, texcode):
'''
texcode: [batchsize, n_tex]
texture: [bz, 3, 256, 256], range: 0-1
'''
texture = self.texture_mean + (self.texture_basis*texcode[:,None,:]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0,3,1,2)
texture = F.interpolate(texture, [256, 256])
texture = texture[:,[2,1,0], :,:]
return texture | 12,754 | 47.683206 | 134 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/decoders.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, latent_dim=100, out_channels=1, out_scale=0.01, sample_mode = 'bilinear'):
super(Generator, self).__init__()
self.out_scale = out_scale
self.init_size = 32 // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2, mode=sample_mode), #16
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #32
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #64
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #128
nn.Conv2d(64, 32, 3, stride=1, padding=1),
nn.BatchNorm2d(32, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #256
nn.Conv2d(32, 16, 3, stride=1, padding=1),
nn.BatchNorm2d(16, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(16, out_channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise):
out = self.l1(noise)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img*self.out_scale | 2,461 | 42.964286 | 97 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/encoders.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from . import resnet
class ResnetEncoder(nn.Module):
def __init__(self, outsize, last_op=None):
super(ResnetEncoder, self).__init__()
feature_size = 2048
self.encoder = resnet.load_ResNet50Model() #out: 2048
### regressor
self.layers = nn.Sequential(
nn.Linear(feature_size, 1024),
nn.ReLU(),
nn.Linear(1024, outsize)
)
self.last_op = last_op
def forward(self, inputs):
features = self.encoder(inputs)
parameters = self.layers(features)
if self.last_op:
parameters = self.last_op(parameters)
return parameters
| 1,424 | 33.756098 | 78 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/detectors.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
# from libs.pose_estimation.fan_model.models import FAN, ResNetDepth
# from libs.pose_estimation.fan_model.utils import *
from enum import Enum
# from libs.pose_estimation.sfd.sfd_detector import SFDDetector as FaceDetector
class FAN(object):
def __init__(self):
import face_alignment
self.model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
def run(self, image):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box list
'''
out = self.model.get_landmarks(image)
if out is None:
return [0], 'error'
else:
kpt = out[0].squeeze()
left = np.min(kpt[:,0]); right = np.max(kpt[:,0]);
top = np.min(kpt[:,1]); bottom = np.max(kpt[:,1])
bbox = [left,top, right, bottom]
return bbox, 'kpt68'
class MTCNN(object):
def __init__(self, device = 'cpu'):
'''
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
'''
from facenet_pytorch import MTCNN as mtcnn
self.device = device
self.model = mtcnn(keep_all=True)
def run(self, input):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box
'''
out = self.model.detect(input[None,...])
if out[0][0] is None:
return [0]
else:
bbox = out[0][0].squeeze()
return bbox, 'bbox'
| 1,983 | 28.61194 | 95 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/datasets.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import os, sys
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import numpy as np
import cv2
import scipy
from skimage.io import imread, imsave
from skimage.transform import estimate_transform, warp, resize, rescale
from glob import glob
import scipy.io
import torch
import kornia
from . import detectors
class TestData(Dataset):
def __init__(self, iscrop=True, crop_size=224, scale=1.25):
'''
testpath: folder, imagepath_list, image path, video path
'''
self.crop_size = crop_size
self.scale = scale
self.iscrop = iscrop
self.resolution_inp = crop_size
self.face_detector = detectors.FAN() # CHANGE
def bbox2point(self, left, right, top, bottom, type='bbox'):
''' bbox from detector and landmarks are different
'''
if type=='kpt68':
old_size = (right - left + bottom - top)/2*1.1
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 ])
elif type=='bbox':
old_size = (right - left + bottom - top)/2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size*0.12])
else:
raise NotImplementedError
return old_size, center
def get_image_tensor(self, image):
" image: tensor 3x256x256"
img_tmp = image.clone()
img_tmp = img_tmp.permute(1,2,0)
bbox, bbox_type = self.face_detector.run(img_tmp)
if bbox_type != 'error':
if len(bbox) < 4:
print('no face detected! run original image')
left = 0; right = h-1; top=0; bottom=w-1
else:
left = bbox[0]; right=bbox[2]
top = bbox[1]; bottom=bbox[3]
old_size, center = self.bbox2point(left, right, top, bottom, type=bbox_type)
size = int(old_size*self.scale)
src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
tform = estimate_transform('similarity', src_pts, DST_PTS)
theta = torch.tensor(tform.params, dtype=torch.float32).unsqueeze(0).cuda()
image_tensor = image.clone()
image_tensor = image_tensor.unsqueeze(0)
dst_image = kornia.warp_affine(image_tensor, theta[:,:2,:], dsize=(224, 224))
dst_image = dst_image.div(255.)
return dst_image.squeeze(0), False
else:
return image, True | 3,379 | 38.302326 | 148 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/detectors_2.py | """
Calculate euler angles yaw pitch roll using deep network HopeNet
https://github.com/natanielruiz/deep-head-pose
The face detector used is SFD (taken from face-alignment FAN) https://github.com/1adrianb/face-alignment
"""
import os
import numpy as np
import sys
from matplotlib import pyplot as plt
import cv2
from enum import Enum
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.model_zoo import load_url
from torchvision import transforms
import torch.backends.cudnn as cudnn
import torchvision
import torch.nn.functional as F
from PIL import Image
# from .image_utils import imshow, imshow_nparray, image_resize
# from .visualization import print_values , draw_detected_face
from libs.pose_estimation.sfd.sfd_detector import SFDDetector as FaceDetector
from libs.pose_estimation.fan_model.models import FAN, ResNetDepth
from libs.pose_estimation.fan_model.utils import *
class LandmarksType(Enum):
"""Enum class defining the type of landmarks to detect.
``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face
``_2halfD`` - this points represent the projection of the 3D points into 3D
``_3D`` - detect the points ``(x,y,z)``` in a 3D space
"""
_2D = 1
_2halfD = 2
_3D = 3
class NetworkSize(Enum):
# TINY = 1
# SMALL = 2
# MEDIUM = 3
LARGE = 4
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
return member
def __int__(self):
return self.value
models_urls = {
'2DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/2DFAN4-11f355bf06.pth.tar',
'3DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/3DFAN4-7835d9f11d.pth.tar',
'depth': 'https://www.adrianbulat.com/downloads/python-fan/depth-2a464da4ea.pth.tar',
}
def get_preds_fromhm(hm, center=None, scale=None):
"""Obtain (x,y) coordinates given a set of N heatmaps. If the center
and the scale is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
center {torch.tensor} -- the center of the bounding box (default: {None})
scale {float} -- face scale (default: {None})
"""
max, idx = torch.max(
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
idx = idx + 1
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm_ = hm[i, j, :]
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
diff = torch.FloatTensor(
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
preds[i, j].add_(diff.sign_().mul_(.25))
preds.add_(-.5)
preds_orig = torch.zeros(preds.size())
if center is not None and scale is not None:
for i in range(hm.size(0)):
for j in range(hm.size(1)):
preds_orig[i, j] = transform(
preds[i, j], center, scale, hm.size(2), True)
return preds, preds_orig
def draw_detected_face(img, face):
# for i, d in enumerate(face):
x_min = int(face[0])
y_min = int(face[1])
x_max = int(face[2])
y_max = int(face[3])
# # print(x_min,y_min,x_max,y_max)
# bbox_width = abs(x_max - x_min)
# bbox_height = abs(y_max - y_min)
# x_min -= 50
# x_max += 50
# y_min -= 50
# y_max += 30
# x_min = max(x_min, 0)
# y_min = max(y_min, 0)
# # print(img.shape)
# x_max = min(img.shape[1], x_max)
# y_max = min(img.shape[0], y_max)
# Crop image
# img = image[:, :, y_min:y_max, x_min:x_max]
# print(x_min,y_min,x_max,y_max)
# img = img[int(y_min):int(y_max),int(x_min):int(x_max)]
cv2.rectangle(img, (int(x_min),int(y_min)), (int(x_max),int(y_max)), (255,0,0), 2)
return img
from os.path import abspath, dirname
current_file_directory = dirname(abspath(__file__))
class LandmarksEstimation():
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load all needed models - Face detector and Pose detector
network_size = NetworkSize.LARGE
network_size = int(network_size)
self.landmarks_type = LandmarksType._2D
self.flip_input = False
# SFD face detection
path_to_detector = './libs/pose_estimation/sfd/model/s3fd-619a316812.pth'
if not os.path.exists(path_to_detector):
'Search on scratch'
path_to_detector = '../../../scratch/k2033759/Finding_directions/pretrained_models/s3fd-619a316812.pth'
face_detector = 'sfd'
self.face_detector = FaceDetector(device='cuda', verbose=False,path_to_detector = path_to_detector)
self.transformations_image = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.transformations = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Initialise the face alignemnt networks
self.face_alignment_net = FAN(network_size)
network_name = '2DFAN-' + str(network_size)
fan_weights = load_url(models_urls[network_name], map_location=lambda storage, loc: storage)
self.face_alignment_net.load_state_dict(fan_weights)
self.face_alignment_net.to(self.device)
self.face_alignment_net.eval()
def detect_landmarks_torch(self, images):
"""
images: torch Tensor B x C x W x H
detected_faces: B X 1 x 5
"""
detected_faces, error, error_index = self.face_detector.detect_from_batch(images)
faces = []
for i in range(images.shape[0]):
box = detected_faces[i]
if len(box) > 1:
max_conf = -1
max_ind = -1
for j in range(len(box)):
conf = box[j][4]
if conf > max_conf:
max_conf = conf
max_ind = j
box_new = box[max_ind]
box = box_new
faces.append(box)
else:
faces.append(box[0])
faces = np.asarray(faces)
bboxes = []
for i in range(faces.shape[0]):
kpt = self.find_landmarks_torch(faces[i], images[i])
kpt = kpt[0].detach().cpu().numpy()
left = np.min(kpt[:,0])
right = np.max(kpt[:,0])
top = np.min(kpt[:,1])
bottom = np.max(kpt[:,1])
bbox = [left, top, right, bottom]
bboxes.append(bbox)
return bboxes, 'kpt68'
def find_landmarks_torch(self, face, image):
center = torch.FloatTensor(
[(face[2] + face[0]) / 2.0,
(face[3] + face[1]) / 2.0])
center[1] = center[1] - (face[3] - face[1]) * 0.12
scale = (face[2] - face[0] + face[3] - face[1]) / self.face_detector.reference_scale
inp = crop_torch(image.unsqueeze(0), center, scale).float().cuda()
# print(inp.shape)
# imshow(inp.squeeze(0))
inp = inp.div(255.0)
out = self.face_alignment_net(inp)[-1]
if self.flip_input:
out = out + flip(self.face_alignment_net(flip(inp))
[-1], is_label=True) # patched inp_batch undefined variable error
out = out.cpu()
pts, pts_img = get_preds_fromhm(out, center, scale)
pts, pts_img = pts.view(-1, 68, 2) * 4, pts_img.view(-1, 68, 2)
return pts_img
def find_landmarks(self, face, image):
# face = face[0]
center = torch.FloatTensor(
[(face[2] + face[0]) / 2.0,
(face[3] + face[1]) / 2.0])
center[1] = center[1] - (face[3] - face[1]) * 0.12
scale = (face[2] - face[0] + face[3] - face[1]) / self.face_detector.reference_scale
inp = crop_torch(image.unsqueeze(0), center, scale).float().cuda()
# print(inp.shape)
# imshow(inp.squeeze(0))
inp = inp.div(255.0)
out = self.face_alignment_net(inp)[-1]
if self.flip_input:
out = out + flip(self.face_alignment_net(flip(inp))
[-1], is_label=True) # patched inp_batch undefined variable error
out = out.cpu()
pts, pts_img = get_preds_fromhm(out, center, scale)
out = out.cuda()
# Added 3D landmark support
if self.landmarks_type == LandmarksType._3D:
pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)
heatmaps = torch.zeros((68,256,256), dtype=torch.float32)
for i in range(68):
if pts[i, 0] > 0:
heatmaps[i] = draw_gaussian(
heatmaps[i], pts[i], 2)
heatmaps = heatmaps.unsqueeze(0)
heatmaps = heatmaps.to(self.device)
if inp.shape[2] != heatmaps.shape[2] or inp.shape[3] != heatmaps.shape[3]:
print(inp.shape)
print(heatmaps.shape)
depth_pred = self.depth_prediciton_net(
torch.cat((inp, heatmaps), 1)).view(68, 1) #.data.cpu().view(68, 1)
# print(depth_pred.view(68, 1).shape)
pts_img = pts_img.cuda()
pts_img = torch.cat(
(pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)
else:
pts, pts_img = pts.view(-1, 68, 2) * 4, pts_img.view(-1, 68, 2)
# if pts_img.requires_grad:
# pts_img.register_hook(lambda grad: print('pts_img',grad))
# print(pts_img.requires_grad)
return pts_img, out
def face_detection(self, image, save_path, image_path):
image_tensor = torch.tensor(np.transpose(image,(2,0,1))).float().cuda()
if len(image_tensor.shape) == 3:
image_tensor = image_tensor.unsqueeze(0).cuda()
detected_faces,error,error_index = self.face_detector.detect_from_batch(image_tensor)
else:
detected_faces,error,error_index = self.face_detector.detect_from_batch(image_tensor)
faces_num = 0
if len(detected_faces[0]) == 0:
return image
for face in detected_faces[0]:
conf = face[4]
# print('Conf {:.2f}'.format(conf))
if conf > 0.9:
x1 = face[0]
y1 = face[1]
x2 = face[2]
y2 = face[3]
w = x2-x1
h = y2-y1
cx = int(x1+w/2)
cy = int(y1+h/2)
if h>w:
w = h
x1_hat = cx - int(w/2)
if x1_hat < 0:
x1_hat = 0
x2_hat = x1_hat + w
else:
h = w
y1_hat = cy - int(h/2)
if y1_hat < 0:
y1_hat = 0
y2_hat = y1_hat + h
# print(int(w), int(h))
# quit()
# w = 100
# h = 100
w_hat = int(w*1.6)
h_hat = int(h*1.6)
x1_hat = cx - int(w_hat/2)
if x1_hat < 0:
x1_hat = 0
y1_hat = cy - int(h_hat/2)
if y1_hat < 0:
y1_hat = 0
x2_hat = x1_hat + w_hat
y2_hat = y1_hat + h_hat
crop = image.copy()
# print(y1_hat, y2_hat, x1_hat, x2_hat)
crop = crop[ y1_hat:y2_hat, x1_hat:x2_hat]
# print(w_hat, h_hat)
crop, scale = image_resize(crop, 256, 256)
# x2 = x1 + w
# y2 = y1 + h
# cx = int(x1+w/2)
# cy = int(y1+h/2)
# w_hat = int(w*1.6)
# h_hat = int(h*1.6)
# x1_hat = cx - int(w_hat/2)
# if x1_hat < 0:
# x1_hat = 0
# y1_hat = cy - int(h_hat/2)
# if y1_hat < 0:
# y1_hat = 0
# x2_hat = x1_hat + w_hat
# y2_hat = y1_hat + h_hat
# crop = image[ y1_hat:y2_hat, x1_hat:x2_hat]
# # cv2.imwrite('./test.png', cv2.cvtColor(crop.copy(), cv2.COLOR_RGB2BGR))
# crop, scale = image_resize(crop , resize_, resize_)
# print(scale)
# img = draw_detected_face(image, face)
# image_name = image_path.split('/')[-1]
# filename = os.path.join(save_path, 'cropped_' +image_name)
# cv2.imwrite(filename, cv2.cvtColor(crop.copy(), cv2.COLOR_RGB2BGR))
# filename_2 = os.path.join(save_path, 'face_' + image_name)
# # img, scale = image_resize(image, 256)
# cv2.imwrite(filename_2, cv2.cvtColor(img.copy(), cv2.COLOR_RGB2BGR))
return crop
@torch.no_grad()
def detect_landmarks(self, image, detected_faces = None, draw_face = False):
twoface = False
# image.register_hook(lambda grad: print('images',grad))
if detected_faces is None:
if len(image.shape) == 3:
image = image.unsqueeze(0).cuda()
detected_faces,error,error_index = self.face_detector.detect_from_batch(image)
else:
detected_faces,error,error_index = self.face_detector.detect_from_batch(image)
twoface = False
batch = 0
num_faces = 0
em_max = -1
index_face = 0
for face in detected_faces[0]:
conf = face[4]
w = face[2] - face[0]
h = face[3] - face[1]
em = w*h
if em>em_max:
em_max = em
index_face = num_faces
# print(face)
# print(w*h)
# print('Conf {:.2f}'.format(conf))
num_faces += 1
# # print(num_faces)
# if num_faces > 1:
# face_final = detected_faces[0]
# quit()
size = len(detected_faces[0])
if self.landmarks_type == LandmarksType._3D:
landmarks = torch.empty((1, 68, 3), requires_grad=True).cuda()
else:
landmarks = torch.empty((1, 68, 2), requires_grad=True).cuda()
counter = 0
for face in detected_faces[0]:
# print(face)
# if len(detected_faces[0]) >1:
# # print(detected_faces)
# # img_np = image.clone()
# # img_np = img_np.squeeze(0)
# # img_np = img_np.detach().cpu().numpy()
# # img_np = np.transpose(img_np, (1, 2, 0))
# # print(detected_faces)
# # img_face = draw_detected_face(img_np, detected_faces[0])
# # cv2.imwrite('test_face.png',img_face)
# # img_face = draw_detected_face(img_np, detected_faces[1])
# # cv2.imwrite('test_face_1.png',img_face)
# # quit()
# twoface = True
# return [], twoface
# else:
# if len(detected_faces) == 0:
# print("Warning: No faces were detected.")
# return None
# # ### Draw detected face
# if draw_face:
# img_np = image.clone()
# img_np = img_np.squeeze(0)
# img_np = img_np.detach().cpu().numpy()
# img_np = np.transpose(img_np, (1, 2, 0))
# print(detected_faces)
# img_face = draw_detected_face(img_np, detected_faces[0])
# cv2.imwrite('test_face.png',img_face)
# # print_values(img_face)
# # imshow_nparray(img_face)
# error_flag = []
conf = face[4]
if conf > 0.99 and counter == index_face:
# print(index_face)
# print(face)
# print('Conf {:.2f}'.format(conf))
pts_img, heatmaps = self.find_landmarks(face, image[0])
landmarks[batch] = pts_img.cuda()
batch += 1
counter += 1
if batch > 1:
twoface = True
return landmarks, twoface, detected_faces | 14,022 | 27.444219 | 110 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/renderer.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.io import imread
import imageio
from pytorch3d.structures import Meshes
from pytorch3d.io import load_obj
from pytorch3d.renderer.mesh import rasterize_meshes
from . import util
# from .rasterizer.standard_rasterize_cuda import standard_rasterize
class Pytorch3dRasterizer(nn.Module):
""" Borrowed from https://github.com/facebookresearch/pytorch3d
Notice:
x,y,z are in image space, normalized
can only render squared image now
"""
def __init__(self, image_size=224):
"""
use fixed raster_settings for rendering faces
"""
super().__init__()
raster_settings = {
'image_size': image_size,
'blur_radius': 0.0,
'faces_per_pixel': 1,
'bin_size': None,
'max_faces_per_bin': None,
'perspective_correct': False,
}
raster_settings = util.dict2obj(raster_settings)
self.raster_settings = raster_settings
def forward(self, vertices, faces, attributes=None):
fixed_vertices = vertices.clone()
fixed_vertices[...,:2] = -fixed_vertices[...,:2]
meshes_screen = Meshes(verts=fixed_vertices.float(), faces=faces.long())
raster_settings = self.raster_settings
pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
meshes_screen,
image_size=raster_settings.image_size,
blur_radius=raster_settings.blur_radius,
faces_per_pixel=raster_settings.faces_per_pixel,
bin_size=raster_settings.bin_size,
max_faces_per_bin=raster_settings.max_faces_per_bin,
perspective_correct=raster_settings.perspective_correct,
)
vismask = (pix_to_face > -1).float()
D = attributes.shape[-1]
attributes = attributes.clone(); attributes = attributes.view(attributes.shape[0]*attributes.shape[1], 3, attributes.shape[-1])
N, H, W, K, _ = bary_coords.shape
mask = pix_to_face == -1
pix_to_face = pix_to_face.clone()
pix_to_face[mask] = 0
idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
pixel_vals[mask] = 0 # Replace masked values in output.
pixel_vals = pixel_vals[:,:,:,0].permute(0,3,1,2)
pixel_vals = torch.cat([pixel_vals, vismask[:,:,:,0][:,None,:,:]], dim=1)
return pixel_vals
class SRenderY(nn.Module):
def __init__(self, image_size, obj_filename, uv_size=256, rasterizer_type='pytorch3d'):
super(SRenderY, self).__init__()
self.image_size = image_size
self.uv_size = uv_size
verts, faces, aux = load_obj(obj_filename)
uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)
uvfaces = faces.textures_idx[None, ...] # (N, F, 3)
faces = faces.verts_idx[None,...]
if rasterizer_type == 'pytorch3d':
self.rasterizer = Pytorch3dRasterizer(image_size)
self.uv_rasterizer = Pytorch3dRasterizer(uv_size)
# faces
dense_triangles = util.generate_triangles(uv_size, uv_size)
self.register_buffer('dense_faces', torch.from_numpy(dense_triangles).long()[None,:,:])
self.register_buffer('faces', faces)
self.register_buffer('raw_uvcoords', uvcoords)
# uv coords
uvcoords = torch.cat([uvcoords, uvcoords[:,:,0:1]*0.+1.], -1) #[bz, ntv, 3]
uvcoords = uvcoords*2 - 1; uvcoords[...,1] = -uvcoords[...,1]
face_uvcoords = util.face_vertices(uvcoords, uvfaces)
self.register_buffer('uvcoords', uvcoords)
self.register_buffer('uvfaces', uvfaces)
self.register_buffer('face_uvcoords', face_uvcoords)
# shape colors, for rendering shape overlay
colors = torch.tensor([180, 180, 180])[None, None, :].repeat(1, faces.max()+1, 1).float()/255.
face_colors = util.face_vertices(colors, faces)
self.register_buffer('face_colors', face_colors)
## SH factors for lighting
pi = np.pi
constant_factor = torch.tensor([1/np.sqrt(4*pi), ((2*pi)/3)*(np.sqrt(3/(4*pi))), ((2*pi)/3)*(np.sqrt(3/(4*pi))),\
((2*pi)/3)*(np.sqrt(3/(4*pi))), (pi/4)*(3)*(np.sqrt(5/(12*pi))), (pi/4)*(3)*(np.sqrt(5/(12*pi))),\
(pi/4)*(3)*(np.sqrt(5/(12*pi))), (pi/4)*(3/2)*(np.sqrt(5/(12*pi))), (pi/4)*(1/2)*(np.sqrt(5/(4*pi)))]).float()
self.register_buffer('constant_factor', constant_factor)
def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):
'''
-- Texture Rendering
vertices: [batch_size, V, 3], vertices in world space, for calculating normals, then shading
transformed_vertices: [batch_size, V, 3], range:normalized to [-1,1], projected vertices in image space (that is aligned to the iamge pixel), for rasterization
albedos: [batch_size, 3, h, w], uv map
lights:
spherical homarnic: [N, 9(shcoeff), 3(rgb)]
points/directional lighting: [N, n_lights, 6(xyzrgb)]
light_type:
point or directional
'''
batch_size = vertices.shape[0]
## rasterizer near 0 far 100. move mesh so minz larger than 0
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
# attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1)); face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1)); transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1),
transformed_face_normals.detach(),
face_vertices.detach(),
face_normals],
-1)
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
# vis mask
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
# albedo
uvcoords_images = rendering[:, :3, :, :]; grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]
albedo_images = F.grid_sample(albedos, grid, align_corners=False)
# visible mask for pixels with positive normal direction
transformed_normal_map = rendering[:, 3:6, :, :].detach()
pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()
# shading
normal_images = rendering[:, 9:12, :, :]
if lights is not None:
if lights.shape[1] == 9:
shading_images = self.add_SHlight(normal_images, lights)
else:
if light_type=='point':
vertice_images = rendering[:, 6:9, :, :].detach()
shading = self.add_pointlight(vertice_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2)
else:
shading = self.add_directionlight(normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2)
images = albedo_images*shading_images
else:
images = albedo_images
shading_images = images.detach()*0.
outputs = {
'images': images*alpha_images,
'albedo_images': albedo_images*alpha_images,
'alpha_images': alpha_images,
'pos_mask': pos_mask,
'shading_images': shading_images,
'grid': grid,
'normals': normals,
'normal_images': normal_images*alpha_images,
'transformed_normals': transformed_normals,
}
return outputs
def add_SHlight(self, normal_images, sh_coeff):
'''
sh_coeff: [bz, 9, 3]
'''
N = normal_images
sh = torch.stack([
N[:,0]*0.+1., N[:,0], N[:,1], \
N[:,2], N[:,0]*N[:,1], N[:,0]*N[:,2],
N[:,1]*N[:,2], N[:,0]**2 - N[:,1]**2, 3*(N[:,2]**2) - 1
],
1) # [bz, 9, h, w]
sh = sh*self.constant_factor[None,:,None,None]
shading = torch.sum(sh_coeff[:,:,:,None,None]*sh[:,:,None,:,:], 1) # [bz, 9, 3, h, w]
return shading
def add_pointlight(self, vertices, normals, lights):
'''
vertices: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nv, 3]
'''
light_positions = lights[:,:,:3]; light_intensities = lights[:,:,3:]
directions_to_lights = F.normalize(light_positions[:,:,None,:] - vertices[:,None,:,:], dim=3)
# normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
shading = normals_dot_lights[:,:,:,None]*light_intensities[:,:,None,:]
return shading.mean(1)
def add_directionlight(self, normals, lights):
'''
normals: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nv, 3]
'''
light_direction = lights[:,:,:3]; light_intensities = lights[:,:,3:]
directions_to_lights = F.normalize(light_direction[:,:,None,:].expand(-1,-1,normals.shape[1],-1), dim=3)
# normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
# normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
shading = normals_dot_lights[:,:,:,None]*light_intensities[:,:,None,:]
return shading.mean(1)
def render_shape(self, vertices, transformed_vertices, images=None, detail_normal_images=None, lights=None):
'''
-- rendering shape with detail normal map
'''
batch_size = vertices.shape[0]
# set lighting
if lights is None:
light_positions = torch.tensor(
[
[-1,1,1],
[1,1,1],
[-1,-1,1],
[1,-1,1],
[0,0,1]
]
)[None,:,:].expand(batch_size, -1, -1).float()
light_intensities = torch.ones_like(light_positions).float()*1.7
lights = torch.cat((light_positions, light_intensities), 2).to(vertices.device)
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
# Attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1)); face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1)); transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
attributes = torch.cat([self.face_colors.expand(batch_size, -1, -1, -1),
transformed_face_normals.detach(),
face_vertices.detach(),
face_normals],
-1)
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
# albedo
albedo_images = rendering[:, :3, :, :]
# mask
transformed_normal_map = rendering[:, 3:6, :, :].detach()
pos_mask = (transformed_normal_map[:, 2:, :, :] < 0.15).float()
# shading
normal_images = rendering[:, 9:12, :, :].detach()
vertice_images = rendering[:, 6:9, :, :].detach()
if detail_normal_images is not None:
normal_images = detail_normal_images
shading = self.add_directionlight(normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2).contiguous()
shaded_images = albedo_images*shading_images
alpha_images = alpha_images*pos_mask
if images is None:
shape_images = shaded_images*alpha_images + torch.zeros_like(shaded_images).to(vertices.device)*(1-alpha_images)
else:
shape_images = shaded_images*alpha_images + images*(1-alpha_images)
return shape_images
def render_depth(self, transformed_vertices):
'''
-- rendering depth
'''
batch_size = transformed_vertices.shape[0]
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] - transformed_vertices[:,:,2].min()
z = -transformed_vertices[:,:,2:].repeat(1,1,3).clone()
z = z-z.min()
z = z/z.max()
# Attributes
attributes = util.face_vertices(z, self.faces.expand(batch_size, -1, -1))
# rasterize
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
depth_images = rendering[:, :1, :, :]
return depth_images
def render_normal(self, transformed_vertices, normals):
'''
-- rendering normal
'''
batch_size = normals.shape[0]
# Attributes
attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
normal_images = rendering[:, :3, :, :]
return normal_images
def world2uv(self, vertices):
'''
warp vertices from world space to uv space
vertices: [bz, V, 3]
uv_vertices: [bz, 3, h, w]
'''
batch_size = vertices.shape[0]
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1), self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]
return uv_vertices | 15,927 | 45.847059 | 217 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/util.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
import torch.nn.functional as F
import math
from collections import OrderedDict
import os
from scipy.ndimage import morphology
from skimage.io import imsave
import cv2
def upsample_mesh(vertices, normals, faces, displacement_map, texture_map, dense_template):
''' upsampling coarse mesh (with displacment map)
vertices: vertices of coarse mesh, [nv, 3]
normals: vertex normals, [nv, 3]
faces: faces of coarse mesh, [nf, 3]
texture_map: texture map, [256, 256, 3]
displacement_map: displacment map, [256, 256]
dense_template:
Returns:
dense_vertices: upsampled vertices with details, [number of dense vertices, 3]
dense_colors: vertex color, [number of dense vertices, 3]
dense_faces: [number of dense faces, 3]
'''
img_size = dense_template['img_size']
dense_faces = dense_template['f']
x_coords = dense_template['x_coords']
y_coords = dense_template['y_coords']
valid_pixel_ids = dense_template['valid_pixel_ids']
valid_pixel_3d_faces = dense_template['valid_pixel_3d_faces']
valid_pixel_b_coords = dense_template['valid_pixel_b_coords']
pixel_3d_points = vertices[valid_pixel_3d_faces[:, 0], :] * valid_pixel_b_coords[:, 0][:, np.newaxis] + \
vertices[valid_pixel_3d_faces[:, 1], :] * valid_pixel_b_coords[:, 1][:, np.newaxis] + \
vertices[valid_pixel_3d_faces[:, 2], :] * valid_pixel_b_coords[:, 2][:, np.newaxis]
vertex_normals = normals
pixel_3d_normals = vertex_normals[valid_pixel_3d_faces[:, 0], :] * valid_pixel_b_coords[:, 0][:, np.newaxis] + \
vertex_normals[valid_pixel_3d_faces[:, 1], :] * valid_pixel_b_coords[:, 1][:, np.newaxis] + \
vertex_normals[valid_pixel_3d_faces[:, 2], :] * valid_pixel_b_coords[:, 2][:, np.newaxis]
pixel_3d_normals = pixel_3d_normals / np.linalg.norm(pixel_3d_normals, axis=-1)[:, np.newaxis]
displacements = displacement_map[y_coords[valid_pixel_ids].astype(int), x_coords[valid_pixel_ids].astype(int)]
dense_colors = texture_map[y_coords[valid_pixel_ids].astype(int), x_coords[valid_pixel_ids].astype(int)]
offsets = np.einsum('i,ij->ij', displacements, pixel_3d_normals)
dense_vertices = pixel_3d_points + offsets
return dense_vertices, dense_colors, dense_faces
# borrowed from https://github.com/YadiraF/PRNet/blob/master/utils/write.py
def write_obj(obj_name,
vertices,
faces,
colors=None,
texture=None,
uvcoords=None,
uvfaces=None,
inverse_face_order=False,
normal_map=None,
):
''' Save 3D face model with texture.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
colors: shape = (nver, 3)
faces: shape = (ntri, 3)
texture: shape = (uv_size, uv_size, 3)
uvcoords: shape = (nver, 2) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '.png')
material_name = 'FaceTexture'
faces = faces.copy()
# mesh lab start with 1, python/c++ start from 0
faces += 1
if inverse_face_order:
faces = faces[:, [2, 1, 0]]
if uvfaces is not None:
uvfaces = uvfaces[:, [2, 1, 0]]
# write obj
with open(obj_name, 'w') as f:
# first line: write mtlib(material library)
# f.write('# %s\n' % os.path.basename(obj_name))
# f.write('#\n')
# f.write('\n')
if texture is not None:
f.write('mtllib %s\n\n' % os.path.basename(mtl_name))
# write vertices
if colors is None:
for i in range(vertices.shape[0]):
f.write('v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2]))
else:
for i in range(vertices.shape[0]):
f.write('v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2]))
# write uv coords
if texture is None:
for i in range(faces.shape[0]):
f.write('f {} {} {}\n'.format(faces[i, 2], faces[i, 1], faces[i, 0]))
else:
for i in range(uvcoords.shape[0]):
f.write('vt {} {}\n'.format(uvcoords[i,0], uvcoords[i,1]))
f.write('usemtl %s\n' % material_name)
# write f: ver ind/ uv ind
uvfaces = uvfaces + 1
for i in range(faces.shape[0]):
f.write('f {}/{} {}/{} {}/{}\n'.format(
# faces[i, 2], uvfaces[i, 2],
# faces[i, 1], uvfaces[i, 1],
# faces[i, 0], uvfaces[i, 0]
faces[i, 0], uvfaces[i, 0],
faces[i, 1], uvfaces[i, 1],
faces[i, 2], uvfaces[i, 2]
)
)
# write mtl
with open(mtl_name, 'w') as f:
f.write('newmtl %s\n' % material_name)
s = 'map_Kd {}\n'.format(os.path.basename(texture_name)) # map to image
f.write(s)
if normal_map is not None:
name, _ = os.path.splitext(obj_name)
normal_name = f'{name}_normals.png'
f.write(f'disp {normal_name}')
# out_normal_map = normal_map / (np.linalg.norm(
# normal_map, axis=-1, keepdims=True) + 1e-9)
# out_normal_map = (out_normal_map + 1) * 0.5
cv2.imwrite(
normal_name,
# (out_normal_map * 255).astype(np.uint8)[:, :, ::-1]
normal_map
)
cv2.imwrite(texture_name, texture)
# ---------------------------- process/generate vertices, normals, faces
def generate_triangles(h, w, margin_x=2, margin_y=5, mask = None):
# quad layout:
# 0 1 ... w-1
# w w+1
#.
# w*h
triangles = []
for x in range(margin_x, w-1-margin_x):
for y in range(margin_y, h-1-margin_y):
triangle0 = [y*w + x, y*w + x + 1, (y+1)*w + x]
triangle1 = [y*w + x + 1, (y+1)*w + x + 1, (y+1)*w + x]
triangles.append(triangle0)
triangles.append(triangle1)
triangles = np.array(triangles)
triangles = triangles[:,[0,2,1]]
return triangles
# borrowed from https://github.com/daniilidis-group/neural_renderer/blob/master/neural_renderer/vertices_to_faces.py
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
vertices = vertices.reshape((bs * nv, 3))
# pytorch only supports long and byte tensors for indexing
return vertices[faces.long()]
def vertex_normals(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of vertices, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
normals = torch.zeros(bs * nv, 3).to(device)
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]
faces = faces.reshape(-1, 3)
vertices_faces = vertices_faces.reshape(-1, 3, 3)
normals.index_add_(0, faces[:, 1].long(),
torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
normals.index_add_(0, faces[:, 2].long(),
torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
normals.index_add_(0, faces[:, 0].long(),
torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))
normals = F.normalize(normals, eps=1e-6, dim=1)
normals = normals.reshape((bs, nv, 3))
# pytorch only supports long and byte tensors for indexing
return normals
def batch_orth_proj(X, camera):
''' orthgraphic projection
X: 3d vertices, [bz, n_point, 3]
camera: scale and translation, [bz, 3], [scale, tx, ty]
'''
camera = camera.clone().view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
X_trans = torch.cat([X_trans, X[:,:,2:]], 2)
shape = X_trans.shape
Xn = (camera[:, :, 0:1] * X_trans)
return Xn
# -------------------------------------- image processing
# borrowed from: https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters
def gaussian(window_size, sigma):
def gauss_fcn(x):
return -(x - window_size // 2)**2 / float(2 * sigma**2)
gauss = torch.stack(
[torch.exp(torch.tensor(gauss_fcn(x))) for x in range(window_size)])
return gauss / gauss.sum()
def get_gaussian_kernel(kernel_size: int, sigma: float):
r"""Function that returns Gaussian filter coefficients.
Args:
kernel_size (int): filter size. It should be odd and positive.
sigma (float): gaussian standard deviation.
Returns:
Tensor: 1D tensor with gaussian filter coefficients.
Shape:
- Output: :math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_gaussian_kernel(3, 2.5)
tensor([0.3243, 0.3513, 0.3243])
>>> kornia.image.get_gaussian_kernel(5, 1.5)
tensor([0.1201, 0.2339, 0.2921, 0.2339, 0.1201])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("kernel_size must be an odd positive integer. "
"Got {}".format(kernel_size))
window_1d = gaussian(kernel_size, sigma)
return window_1d
def get_gaussian_kernel2d(kernel_size, sigma):
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (Tuple[int, int]): filter sizes in the x and y direction.
Sizes should be odd and positive.
sigma (Tuple[int, int]): gaussian standard deviation in the x and y
direction.
Returns:
Tensor: 2D tensor with gaussian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_gaussian_kernel2d((3, 3), (1.5, 1.5))
tensor([[0.0947, 0.1183, 0.0947],
[0.1183, 0.1478, 0.1183],
[0.0947, 0.1183, 0.0947]])
>>> kornia.image.get_gaussian_kernel2d((3, 5), (1.5, 1.5))
tensor([[0.0370, 0.0720, 0.0899, 0.0720, 0.0370],
[0.0462, 0.0899, 0.1123, 0.0899, 0.0462],
[0.0370, 0.0720, 0.0899, 0.0720, 0.0370]])
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError("kernel_size must be a tuple of length two. Got {}"
.format(kernel_size))
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError("sigma must be a tuple of length two. Got {}"
.format(sigma))
ksize_x, ksize_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x = get_gaussian_kernel(ksize_x, sigma_x)
kernel_y = get_gaussian_kernel(ksize_y, sigma_y)
kernel_2d = torch.matmul(
kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t())
return kernel_2d
def gaussian_blur(x, kernel_size=(3,3), sigma=(0.8,0.8)):
b, c, h, w = x.shape
kernel = get_gaussian_kernel2d(kernel_size, sigma).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = [(k - 1) // 2 for k in kernel_size]
return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
def _compute_binary_kernel(window_size):
r"""Creates a binary kernel to extract the patches. If the window size
is HxW will create a (H*W)xHxW kernel.
"""
window_range = window_size[0] * window_size[1]
kernel: torch.Tensor = torch.zeros(window_range, window_range)
for i in range(window_range):
kernel[i, i] += 1.0
return kernel.view(window_range, 1, window_size[0], window_size[1])
def median_blur(x, kernel_size=(3,3)):
b, c, h, w = x.shape
kernel = _compute_binary_kernel(kernel_size).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = [(k - 1) // 2 for k in kernel_size]
features = F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
features = features.view(b,c,-1,h,w)
median = torch.median(features, dim=2)[0]
return median
def get_laplacian_kernel2d(kernel_size: int):
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (int): filter size should be odd.
Returns:
Tensor: 2D tensor with laplacian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_laplacian_kernel2d(3)
tensor([[ 1., 1., 1.],
[ 1., -8., 1.],
[ 1., 1., 1.]])
>>> kornia.image.get_laplacian_kernel2d(5)
tensor([[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., -24., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
kernel = torch.ones((kernel_size, kernel_size))
mid = kernel_size // 2
kernel[mid, mid] = 1 - kernel_size ** 2
kernel_2d: torch.Tensor = kernel
return kernel_2d
def laplacian(x):
# https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters/laplacian.html
b, c, h, w = x.shape
kernel_size = 3
kernel = get_laplacian_kernel2d(kernel_size).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = (kernel_size - 1) // 2
return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
def angle2matrix(angles):
''' get rotation matrix from three rotation angles(degree). right-handed.
Args:
angles: [batch_size, 3] tensor containing X, Y, and Z angles.
x: pitch. positive for looking down.
y: yaw. positive for looking left.
z: roll. positive for tilting head right.
Returns:
R: [batch_size, 3, 3]. rotation matrices.
'''
angles = angles*(np.pi)/180.
s = torch.sin(angles)
c = torch.cos(angles)
cx, cy, cz = (c[:, 0], c[:, 1], c[:, 2])
sx, sy, sz = (s[:, 0], s[:, 1], s[:, 2])
zeros = torch.zeros_like(s[:, 0]).to(angles.device)
ones = torch.ones_like(s[:, 0]).to(angles.device)
# Rz.dot(Ry.dot(Rx))
R_flattened = torch.stack(
[
cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx,
sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx,
-sy, cy * sx, cy * cx,
],
dim=0) #[batch_size, 9]
R = torch.reshape(R_flattened, (-1, 3, 3)) #[batch_size, 3, 3]
return R
def binary_erosion(tensor, kernel_size=5):
# tensor: [bz, 1, h, w].
device = tensor.device
mask = tensor.cpu().numpy()
structure=np.ones((kernel_size,kernel_size))
new_mask = mask.copy()
for i in range(mask.shape[0]):
new_mask[i,0] = morphology.binary_erosion(mask[i,0], structure)
return torch.from_numpy(new_mask.astype(np.float32)).to(device)
def flip_image(src_image, kps):
'''
purpose:
flip a image given by src_image and the 2d keypoints
flip_mode:
0: horizontal flip
>0: vertical flip
<0: horizontal & vertical flip
'''
h, w = src_image.shape[0], src_image.shape[1]
src_image = cv2.flip(src_image, 1)
if kps is not None:
kps[:, 0] = w - 1 - kps[:, 0]
kp_map = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13]
kps[:, :] = kps[kp_map]
return src_image, kps
# -------------------------------------- io
def copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):
def _get_params(key):
key = prefix + key
if key in pre_state_dict:
return pre_state_dict[key]
return None
for k in cur_state_dict.keys():
if load_name is not None:
if load_name not in k:
continue
v = _get_params(k)
try:
if v is None:
# print('parameter {} not found'.format(k))
continue
cur_state_dict[k].copy_(v)
except:
# print('copy param {} failed'.format(k))
continue
def check_mkdir(path):
if not os.path.exists(path):
print('creating %s' % path)
os.makedirs(path)
def check_mkdirlist(pathlist):
for path in pathlist:
if not os.path.exists(path):
print('creating %s' % path)
os.makedirs(path)
def tensor2image(tensor):
image = tensor.detach().cpu().numpy()
image = image*255.
image = np.maximum(np.minimum(image, 255), 0)
image = image.transpose(1,2,0)[:,:,[2,1,0]]
return image.astype(np.uint8).copy()
def dict2obj(d):
# if isinstance(d, list):
# d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
# original saved file with DataParallel
def remove_module(state_dict):
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def dict_tensor2npy(tensor_dict):
npy_dict = {}
for key in tensor_dict:
npy_dict[key] = tensor_dict[key][0].cpu().numpy()
return npy_dict
# ---------------------------------- visualization
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype = np.int32) - 1
def plot_kpts(image, kpts, color = 'r'):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
if color == 'r':
c = (255, 0, 0)
elif color == 'g':
c = (0, 255, 0)
elif color == 'b':
c = (255, 0, 0)
image = image.copy()
kpts = kpts.copy()
for i in range(kpts.shape[0]):
st = kpts[i, :2]
if kpts.shape[1]==4:
if kpts[i, 3] > 0.5:
c = (0, 255, 0)
else:
c = (0, 0, 255)
image = cv2.circle(image,(st[0], st[1]), 1, c, 2)
if i in end_list:
continue
ed = kpts[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def plot_verts(image, kpts, color = 'r'):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
if color == 'r':
c = (255, 0, 0)
elif color == 'g':
c = (0, 255, 0)
elif color == 'b':
c = (0, 0, 255)
elif color == 'y':
c = (0, 255, 255)
image = image.copy()
for i in range(kpts.shape[0]):
st = kpts[i, :2]
image = cv2.circle(image,(st[0], st[1]), 1, c, 2)
return image
def tensor_vis_landmarks(images, landmarks, gt_landmarks=None, color = 'g', isScale=True):
# visualize landmarks
vis_landmarks = []
images = images.cpu().numpy()
predicted_landmarks = landmarks.detach().cpu().numpy()
if gt_landmarks is not None:
gt_landmarks_np = gt_landmarks.detach().cpu().numpy()
for i in range(images.shape[0]):
image = images[i]
image = image.transpose(1,2,0)[:,:,[2,1,0]].copy(); image = (image*255)
if isScale:
predicted_landmark = predicted_landmarks[i]*image.shape[0]/2 + image.shape[0]/2
else:
predicted_landmark = predicted_landmarks[i]
if predicted_landmark.shape[0] == 68:
image_landmarks = plot_kpts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks, gt_landmarks_np[i]*image.shape[0]/2 + image.shape[0]/2, 'r')
else:
image_landmarks = plot_verts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks, gt_landmarks_np[i]*image.shape[0]/2 + image.shape[0]/2, 'r')
vis_landmarks.append(image_landmarks)
vis_landmarks = np.stack(vis_landmarks)
vis_landmarks = torch.from_numpy(vis_landmarks[:,:,:,[2,1,0]].transpose(0,3,1,2))/255.#, dtype=torch.float32)
return vis_landmarks | 22,570 | 36.55574 | 145 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/rotation_converter.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
''' Rotation Converter
Representations:
euler angle(3), angle axis(3), rotation matrix(3x3), quaternion(4), continous repre
Ref:
https://kornia.readthedocs.io/en/v0.1.2/_modules/torchgeometry/core/conversions.html#
smplx/lbs
'''
pi = torch.Tensor([3.14159265358979323846])
def rad2deg(tensor):
"""Function that converts angles from radians to degrees.
See :class:`~torchgeometry.RadToDeg` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Example:
>>> input = tgm.pi * torch.rand(1, 3, 3)
>>> output = tgm.rad2deg(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)
def deg2rad(tensor):
"""Function that converts angles from degrees to radians.
See :class:`~torchgeometry.DegToRad` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = tgm.deg2rad(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.
######### to quaternion
def euler_to_quaternion(r):
x = r[..., 0]
y = r[..., 1]
z = r[..., 2]
z = z/2.0
y = y/2.0
x = x/2.0
cz = torch.cos(z)
sz = torch.sin(z)
cy = torch.cos(y)
sy = torch.sin(y)
cx = torch.cos(x)
sx = torch.sin(x)
quaternion = torch.zeros_like(r.repeat(1,2))[..., :4].to(r.device)
quaternion[..., 0] += cx*cy*cz - sx*sy*sz
quaternion[..., 1] += cx*sy*sz + cy*cz*sx
quaternion[..., 2] += cx*cz*sy - sx*cy*sz
quaternion[..., 3] += cx*cy*sz + sx*cz*sy
return quaternion
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
# if not rotation_matrix.shape[-2:] == (3, 4):
# raise ValueError(
# "Input size must be a N x 3 x 4 tensor. Got {}".format(
# rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1.float()
mask_c1 = mask_d2 * (1 - mask_d0_d1.float())
mask_c2 = (1 - mask_d2.float()) * mask_d0_nd1
mask_c3 = (1 - mask_d2.float()) * (1 - mask_d0_nd1.float())
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
# def angle_axis_to_quaternion(theta):
# batch_size = theta.shape[0]
# l1norm = torch.norm(theta + 1e-8, p=2, dim=1)
# angle = torch.unsqueeze(l1norm, -1)
# normalized = torch.div(theta, angle)
# angle = angle * 0.5
# v_cos = torch.cos(angle)
# v_sin = torch.sin(angle)
# quat = torch.cat([v_cos, v_sin * normalized], dim=1)
# return quat
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert an angle axis to a quaternion.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 4) # Nx4
>>> quaternion = tgm.angle_axis_to_quaternion(angle_axis) # Nx3
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a tensor of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
# print(quaternion)
return torch.cat([w, quaternion], dim=-1)
#### quaternion to
def quaternion_to_rotation_matrix(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def quaternion_to_angle_axis(quaternion: torch.Tensor):
"""Convert quaternion vector to angle axis of rotation. TODO: CORRECT
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta).to(quaternion.device)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion).to(quaternion.device)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
#### batch converter
def batch_euler2axis(r):
return quaternion_to_angle_axis(euler_to_quaternion(r))
def batch_euler2matrix(r):
return quaternion_to_rotation_matrix(euler_to_quaternion(r))
def batch_matrix2euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
### only y?
# TODO:
# sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
# rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
# return torch.atan2(-rot_mats[:, 2, 0], sy)
batch_index = 0
yaw = torch.zeros(rot_mats.shape[0],1)
pitch = torch.zeros(rot_mats.shape[0],1)
roll = torch.zeros(rot_mats.shape[0],1)
for R in rot_mats:
if R[2, 0] > 0.998:
z = 0
x = np.pi / 2
y = z + atan2(-R[0, 1], -R[0, 2])
elif R[2, 0] < -0.998:
z = 0
x = -np.pi / 2
y = -z + torch.atan2(R[0, 1], R[0, 2])
else:
x = torch.asin(R[2, 0])
y = torch.atan2(R[2, 1] / torch.cos(x), R[2, 2] / torch.cos(x))
z = torch.atan2(R[1, 0] / torch.cos(x), R[0, 0] / torch.cos(x))
yaw[batch_index] = x
pitch[batch_index] = y
roll[batch_index] = z
batch_index = batch_index + 1
angles = torch.zeros(1, 3)
angles[:,0] = x
angles[:,1] = y
angles[:,2] = z
return angles
def batch_matrix2axis(rot_mats):
return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))
def batch_axis2matrix(theta):
# angle axis to rotation matrix
# theta N x 3
# return quat2mat(quat)
# batch_rodrigues
return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))
def batch_axis2euler(theta):
return batch_matrix2euler(batch_axis2matrix(theta))
def batch_orth_proj(X, camera):
'''
X is N x num_pquaternion_to_angle_axisoints x 3
'''
camera = camera.clone().view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
X_trans = torch.cat([X_trans, X[:,:,2:]], 2)
Xn = (camera[:, :, 0:1] * X_trans)
return Xn
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' same as batch_matrix2axis
Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
| 12,670 | 30.132678 | 87 | py |
pyUSID-legacy | pyUSID-master-legacy/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
import matplotlib
matplotlib.use('agg')
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
from pyUSID import __version__ as pyusid_version
# - Copy over examples folder to docs/source
# This makes it so that nbsphinx properly loads the notebook images
examples_source = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "notebooks"))
examples_dest = os.path.abspath(
os.path.join(os.path.dirname(__file__), "notebooks"))
if os.path.exists(examples_dest):
shutil.rmtree(examples_dest)
os.mkdir(examples_dest)
for root, dirs, files in os.walk(examples_source):
for dr in dirs:
os.mkdir(os.path.join(root.replace(examples_source, examples_dest), dr))
for fil in files:
if os.path.splitext(fil)[1] in [".ipynb", ".md", ".rst"]:
source_filename = os.path.join(root, fil)
dest_filename = source_filename.replace(examples_source, examples_dest)
shutil.copyfile(source_filename, dest_filename)
# -- Project information -----------------------------------------------------
project = 'pyUSID'
copyright = '2018, Suhas Somnath and Chris R. Smith'
author = 'Suhas Somnath and Chris R. Smith'
# The short X.Y version
version = pyusid_version
# The full version, including alpha/beta/rc tags.
release = pyusid_version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'nbsphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.napoleon', # Use either napoleon or numpydoc not both.
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# Ignore errors during notebook execution (for the time being...)
nbsphinx_allow_errors = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'supporting_docs']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Napoleon settings
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_type_aliases = None
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Generate autosummary even if no references
autosummary_generate = True
autoclass_content = 'both'
autodoc_default_flags = ['members',
'inherited-members',
# 'private-members',
# 'show-inheritance'
]
autodoc_inherit_docstrings = True # If no class summary, inherit base class summary
# -- Options for HTML output -------------------------------------------------
# on_rtd is whether on readthedocs.org, this line of code grabbed from docs.readthedocs.org...
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'pyUSID v0.0a4'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo_v01.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyUSIDdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyUSID.tex', u'pyUSID Documentation',
u'Suhas Somnath and Chris R. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyUSID', u'pyUSID Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyUSID', u'pyUSID Documentation',
author, 'pyUSID', 'Framework for storing, visualizing, and processing Universal Spectroscopic and Imaging Data (USID)',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'h5py': ('https://docs.h5py.org/en/latest/', None),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
'dask': ('https://docs.dask.org/en/latest/', None),
}
# -------------------------------------------------
| 14,888 | 32.160356 | 124 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test_regression.py | import torch
import torch.nn as nn
import torch.optim as optim
import configs
from data.qmul_loader import get_batch, train_people, test_people
from io_utils import parse_args_regression, get_resume_file
from methods.DKT_regression import DKT
from methods.feature_transfer_regression import FeatureTransfer
import backbone
import numpy as np
params = parse_args_regression('test_regression')
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params.checkpoint_dir = '%scheckpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
bb = backbone.Conv3().cuda()
if params.method=='DKT':
model = DKT(bb).cuda()
optimizer = None
elif params.method=='transfer':
model = FeatureTransfer(bb).cuda()
optimizer = optim.Adam([{'params':model.parameters(),'lr':0.001}])
else:
ValueError('Unrecognised method')
model.load_checkpoint(params.checkpoint_dir)
mse_list = []
for epoch in range(params.n_test_epochs):
mse = float(model.test_loop(params.n_support, optimizer).cpu().detach().numpy())
mse_list.append(mse)
print("-------------------")
print("Average MSE: " + str(np.mean(mse_list)) + " +- " + str(np.std(mse_list)))
print("-------------------")
| 1,301 | 31.55 | 114 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test.py | from pathlib import Path
import torch
import numpy as np
import random
import torch.optim
import torch.utils.data.sampler
import os
import time
from typing import Type
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinefinetune import BaselineFinetune
from methods.hypernets.hypernet_poc import HyperNetPOC
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.DKT import DKT
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from methods.hypernets.bayeshmaml import BayesHMAML
from methods.hypernets.hypermaml import HyperMAML
from io_utils import model_dict, parse_args, get_best_file , get_assigned_file
def _set_seed(seed, verbose=True):
if(seed!=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if(verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if(verbose): print("[INFO] Setting SEED: None")
def feature_evaluation(cl_data_file, model, n_way = 5, n_support = 5, n_query = 15, adaptation = False):
class_list = cl_data_file.keys()
select_class = random.sample(class_list,n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all) )
model.n_query = n_query
if adaptation:
scores = model.set_forward_adaptation(z_all, is_feature = True)
else:
scores, _ = model.set_forward(z_all, is_feature = True)
pred = scores.data.cpu().numpy().argmax(axis = 1)
y = np.repeat(range( n_way ), n_query )
acc = np.mean(pred == y)*100
return acc
def single_test(params):
acc_all = []
iter_num = 600
n_query = max(1, int(16 * params.test_n_way / params.train_n_way)) # if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
print("n_query", n_query)
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot, n_query=n_query)
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug ,f'model = {params.model}, train_aug= {params.train_aug} ' \
f'omniglot only support Conv4 without augmentation'
# params.model = 'Conv4S'
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'baseline++':
model = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
elif params.method == 'DKT':
model = DKT(model_dict[params.model], **few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML( model_dict[params.model], params=params, approx = (params.method == 'maml_approx') , **few_shot_params )
if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
elif params.method in list(hypernet_types.keys()):
few_shot_params['n_query'] = 15
hn_type: Type[HyperNetPOC] = hypernet_types[params.method]
model = hn_type(model_dict[params.model], params=params, **few_shot_params)
# model = HyperNetPOC(model_dict[params.model], **few_shot_params)
elif params.method == 'hyper_maml' or params.method == 'bayes_hmaml':
if params.method == 'bayes_hmaml':
model = BayesHMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'), **few_shot_params)
else:
model = HyperMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
few_shot_params["n_query"] = 15
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(
configs.save_dir,
params.dataset,
params.model,
params.method
)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
checkpoint_dir = checkpoint_dir + "_" + params.checkpoint_suffix
if params.dataset == "cross":
if not Path(checkpoint_dir).exists():
checkpoint_dir = checkpoint_dir.replace("cross", "miniImagenet")
assert Path(checkpoint_dir).exists(), checkpoint_dir
#modelfile = get_resume_file(checkpoint_dir)
if not params.method in ['baseline', 'baseline++'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
print("Using model file", modelfile)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
else:
print("[WARNING] Cannot find 'best_file.tar' in: " + str(checkpoint_dir))
split = params.split
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
eval_time = 0
if params.method in ['maml', 'maml_approx', 'hyper_maml','bayes_hmaml', 'DKT'] + list(hypernet_types.keys()): #maml do not support testing with feature
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
datamgr = SetDataManager(image_size, n_eposide = iter_num, **few_shot_params)
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split +'.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split +'.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 if params.hn_val_epochs == -1 else params.hn_val_epochs
#We perform adaptation on MAML simply by updating more times.
model.eval()
model.single_test = True
if isinstance(model, (MAML, BayesHMAML, HyperMAML)):
acc_mean, acc_std, eval_time, *_ = model.test_loop( novel_loader, return_std = True, return_time=True)
else:
acc_mean, acc_std, *_ = model.test_loop( novel_loader, return_std = True)
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5") #defaut split = novel, but you can also test base or val classes
cl_data_file = feat_loader.init_loader(novel_file)
for i in range(iter_num):
acc = feature_evaluation(cl_data_file, model, adaptation = params.adaptation, **few_shot_params)
acc_all.append(acc)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
with open('./record/results.txt' , 'a') as f:
timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
aug_str = '-aug' if params.train_aug else ''
aug_str += '-adapted' if params.adaptation else ''
if params.method in ['baseline', 'baseline++'] :
exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.test_n_way )
else:
exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str , params.n_shot , params.train_n_way, params.test_n_way )
acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num))
f.write( 'Time: %s, Setting: %s, Acc: %s \n' %(timestamp,exp_setting,acc_str) )
print("Test loop time:", eval_time)
return acc_mean, eval_time
def perform_test(params):
seed = params.seed
repeat = params.repeat
# repeat the test N times changing the seed in range [seed, seed+repeat]
accuracy_list = list()
time_list = list()
for i in range(seed, seed + repeat):
if (seed != 0):
_set_seed(i)
else:
_set_seed(0)
acc, test_time = single_test(params)
accuracy_list.append(acc)
time_list.append(test_time)
mean_acc = np.mean(accuracy_list)
std_acc = np.std(accuracy_list)
mean_time = np.mean(time_list)
std_time = np.std(time_list)
print("-----------------------------")
print(
f'Seeds = {repeat} | Overall Test Acc = {mean_acc:.2f} +- {std_acc:.2f}. Eval time: {mean_time:.2f} +- {std_time:.2f}' )
print("-----------------------------")
return {
"accuracy_mean": mean_acc,
"accuracy_std": std_acc,
"time_mean": mean_time,
"time_std": std_time,
"n_seeds": repeat
}
def main():
params = parse_args('test')
perform_test(params)
if __name__ == '__main__':
main()
| 11,323 | 40.028986 | 195 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/test_uncertainty.py | import torch
import numpy as np
import random
from torch.autograd import Variable
import torch.nn as nn
import torch.optim
import json
import torch.utils.data.sampler
import os
import glob
import time
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.DKT import DKT
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, get_resume_file, parse_args, get_best_file , get_assigned_file
def _set_seed(seed, verbose=True):
if(seed!=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if(verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if(verbose): print("[INFO] Setting SEED: None")
class ECELoss(nn.Module):
""" Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin.
Adapted from: https://github.com/gpleiss/temperature_scaling
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def calibrate(self, logits, labels, iterations=50, lr=0.01):
temperature_raw = torch.ones(1, requires_grad=True, device="cuda")
nll_criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.LBFGS([temperature_raw], lr=lr, max_iter=iterations)
softplus = nn.Softplus() #temperature must be > zero, Softplus could be used
def closure():
if torch.is_grad_enabled(): optimizer.zero_grad()
#loss = nll_criterion(logits / softplus(temperature_raw.expand_as(logits)), labels)
loss = nll_criterion(logits / temperature_raw.expand_as(logits), labels)
if loss.requires_grad: loss.backward()
return loss
optimizer.step(closure)
return temperature_raw
def forward(self, logits, labels, temperature=1.0, onevsrest=False):
logits_scaled = logits / temperature
if(onevsrest):
softmaxes = torch.sigmoid(logits_scaled) / torch.sum(torch.sigmoid(logits_scaled), dim=1, keepdim=True)
else:
softmaxes = torch.softmax(logits_scaled, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
def get_logits_targets(params):
acc_all = []
iter_num = 600
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug ,'omniglot only support Conv4 without augmentation'
params.model = 'Conv4S'
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'baseline++':
model = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
elif params.method == 'DKT':
model = DKT(model_dict[params.model], **few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML( model_dict[params.model], approx = (params.method == 'maml_approx') , **few_shot_params )
if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
#modelfile = get_resume_file(checkpoint_dir)
if not params.method in ['baseline', 'baseline++'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
else:
print("[WARNING] Cannot find 'best_file.tar' in: " + str(checkpoint_dir))
split = params.split
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
if params.method in ['maml', 'maml_approx', 'DKT']: #maml do not support testing with feature
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
datamgr = SetDataManager(image_size, n_eposide = iter_num, n_query = 15 , **few_shot_params)
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split +'.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split +'.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
model.eval()
logits_list = list()
targets_list = list()
for i, (x,_) in enumerate(novel_loader):
logits = model.get_logits(x).detach()
targets = torch.tensor(np.repeat(range(params.test_n_way), model.n_query)).cuda()
logits_list.append(logits) #.cpu().detach().numpy())
targets_list.append(targets) #.cpu().detach().numpy())
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5")
cl_data_file = feat_loader.init_loader(novel_file)
logits_list = list()
targets_list = list()
n_query = 15
n_way = few_shot_params['n_way']
n_support = few_shot_params['n_support']
class_list = cl_data_file.keys()
for i in range(iter_num):
#----------------------
select_class = random.sample(class_list,n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all))
model.n_query = n_query
logits = model.set_forward(z_all, is_feature = True).detach()
targets = torch.tensor(np.repeat(range(n_way), n_query)).cuda()
logits_list.append(logits)
targets_list.append(targets)
#----------------------
return torch.cat(logits_list, 0), torch.cat(targets_list, 0)
def main():
params = parse_args('test')
seed = params.seed
repeat = params.repeat
# 1. Find the value of temperature (calibration)
print("Calibration: finding temperature hyperparameter...")
ece_module = ECELoss()
temperature_list = list()
for _ in range(repeat):#repeat):
_set_seed(0) # random seed
logits, targets = get_logits_targets(parse_args('test'))
temperature = ece_module.calibrate(logits, targets, iterations=300, lr=0.01).item()
if(temperature>0): temperature_list.append(temperature)
print("Calibration: temperature", temperature, "; mean temperature", np.mean(temperature_list))
# Filtering invalid temperatures (e.g. temp<0)
if(len(temperature_list)>0):temperature = np.mean(temperature_list)
else: temperature = 1.0
# 2. Use the temperature to record the ECE
# repeat the test N times changing the seed in range [seed, seed+repeat]
ece_list = list()
for i in range(seed, seed+repeat):
if(seed!=0): _set_seed(i)
else: _set_seed(0)
logits, targets = get_logits_targets(parse_args('test'))
#ece = ece_module.forward(logits, targets, temperature, onevsrest=params.method=='DKT').item()
ece = ece_module.forward(logits, targets, temperature, onevsrest=False).item()
ece_list.append(ece)
print("ECE:", np.mean(ece_list), "+-", np.std(ece_list))
# 3. Print the final ECE (averaged over all seeds)
print("-----------------------------")
print('Seeds = %d | Overall ECE = %4.4f +- %4.4f' %(repeat, np.mean(ece_list), np.std(ece_list)))
print("-----------------------------")
if __name__ == '__main__':
main()
| 11,741 | 43.309434 | 127 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/utils.py | import torch
import numpy as np
def one_hot(y, num_class):
return torch.zeros((len(y), num_class)).scatter_(1, y.unsqueeze(1), 1)
def DBindex(cl_data_file):
class_list = cl_data_file.keys()
cl_num= len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append( np.mean(cl_data_file[cl], axis = 0) )
stds.append( np.sqrt(np.mean( np.sum(np.square( cl_data_file[cl] - cl_means[-1]), axis = 1))))
mu_i = np.tile( np.expand_dims( np.array(cl_means), axis = 0), (len(class_list),1,1) )
mu_j = np.transpose(mu_i,(1,0,2))
mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis = 2))
for i in range(cl_num):
DBs.append( np.max([ (stds[i]+ stds[j])/mdists[i,j] for j in range(cl_num) if j != i ]) )
return np.mean(DBs)
def sparsity(cl_data_file):
class_list = cl_data_file.keys()
cl_sparsity = []
for cl in class_list:
cl_sparsity.append(np.mean([np.sum(x!=0) for x in cl_data_file[cl] ]) )
return np.mean(cl_sparsity)
| 1,052 | 31.90625 | 102 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/backbone.py | # This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
# Basic ResNet model
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class distLinear(nn.Module):
def __init__(self, indim, outdim):
super(distLinear, self).__init__()
self.L = nn.Linear( indim, outdim, bias = False)
self.class_wise_learnable_norm = True #See the issue#4&8 in the github
if self.class_wise_learnable_norm:
WeightNorm.apply(self.L, 'weight', dim=0) #split the weight update component to direction and norm
if outdim <=200:
self.scale_factor = 2 #a fixed scale factor to scale the output of cos value into a reasonably large input for softmax
else:
self.scale_factor = 10 #in omniglot, a larger scale factor is required to handle >1000 output classes.
def forward(self, x):
x_norm = torch.norm(x, p=2, dim =1).unsqueeze(1).expand_as(x)
x_normalized = x.div(x_norm+ 0.00001)
if not self.class_wise_learnable_norm:
L_norm = torch.norm(self.L.weight.data, p=2, dim =1).unsqueeze(1).expand_as(self.L.weight.data)
self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)
cos_dist = self.L(x_normalized) #matrix product by forward function, but when using WeightNorm, this also multiply the cosine distance by a class-wise learnable norm, see the issue#4&8 in the github
scores = self.scale_factor* (cos_dist)
return scores
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class Linear_fw(nn.Linear): #used in MAML to forward input with fast weight
def __init__(self, in_features, out_features):
super(Linear_fw, self).__init__(in_features, out_features)
self.weight.fast = None #Lazy hack to add fast weight link
self.bias.fast = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
out = F.linear(x, self.weight.fast, self.bias.fast) #weight.fast (fast weight) is the temporaily adapted weight
else:
out = super(Linear_fw, self).forward(x)
return out
class BLinear_fw(Linear_fw): #used in BHMAML to forward input with fast weight
def __init__(self, in_features, out_features):
super(BLinear_fw, self).__init__(in_features, out_features)
self.weight.logvar = None
self.weight.mu = None
self.bias.logvar = None
self.bias.mu = None
def forward(self, x):
if self.weight.fast is not None and self.bias.fast is not None:
preds = []
for w, b in zip(self.weight.fast, self.bias.fast):
preds.append(F.linear(x, w, b))
out = sum(preds) / len(preds)
else:
out = super(BLinear_fw, self).forward(x)
return out
class Conv2d_fw(nn.Conv2d): #used in MAML to forward input with fast weight
def __init__(self, in_channels, out_channels, kernel_size, stride=1,padding=0, bias = True):
super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
self.weight.fast = None
if not self.bias is None:
self.bias.fast = None
def forward(self, x):
if self.bias is None:
if self.weight.fast is not None:
out = F.conv2d(x, self.weight.fast, None, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
else:
if self.weight.fast is not None and self.bias.fast is not None:
out = F.conv2d(x, self.weight.fast, self.bias.fast, stride= self.stride, padding=self.padding)
else:
out = super(Conv2d_fw, self).forward(x)
return out
class BatchNorm2d_fw(nn.BatchNorm2d): #used in MAML to forward input with fast weight
def __init__(self, num_features):
super(BatchNorm2d_fw, self).__init__(num_features)
self.weight.fast = None
self.bias.fast = None
def forward(self, x):
running_mean = torch.zeros(x.data.size()[1]).cuda()
running_var = torch.ones(x.data.size()[1]).cuda()
if self.weight.fast is not None and self.bias.fast is not None:
out = F.batch_norm(x, running_mean, running_var, self.weight.fast, self.bias.fast, training = True, momentum = 1)
#batch_norm momentum hack: follow hack of Kate Rakelly in pytorch-maml/src/layers.py
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias, training = True, momentum = 1)
return out
# Simple Conv Block
class ConvBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, pool = True, padding = 1):
super(ConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C = Conv2d_fw(indim, outdim, 3, padding = padding)
self.BN = BatchNorm2d_fw(outdim)
else:
self.C = nn.Conv2d(indim, outdim, 3, padding= padding)
self.BN = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C, self.BN, self.relu]
if pool:
self.pool = nn.MaxPool2d(2)
self.parametrized_layers.append(self.pool)
for layer in self.parametrized_layers:
init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self,x):
out = self.trunk(x)
return out
# Simple ResNet Block
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BatchNorm2d_fw(outdim)
self.C2 = Conv2d_fw(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = nn.BatchNorm2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = nn.BatchNorm2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BatchNorm2d_fw(outdim)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = nn.BatchNorm2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
# Bottleneck block
class BottleneckBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
if self.maml:
self.C1 = Conv2d_fw(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BatchNorm2d_fw(bottleneckdim)
self.C2 = Conv2d_fw(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BatchNorm2d_fw(bottleneckdim)
self.C3 = Conv2d_fw(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BatchNorm2d_fw(outdim)
else:
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.BatchNorm2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = nn.BatchNorm2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.BatchNorm2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
if self.maml:
self.shortcut = Conv2d_fw(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
else:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ConvNet(nn.Module):
def __init__(self, depth, flatten = True, pool=False):
super(ConvNet,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i <4 ) ) #only pooling for fist 4 layers
trunk.append(B)
if pool:
trunk.append(nn.AdaptiveAvgPool2d((1,1)))
if flatten:
trunk.append(Flatten())
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim: int = 64 # outdim if pool else 1600
def forward(self,x):
out = self.trunk(x)
return out
class ConvNetNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling
def __init__(self, depth):
super(ConvNetNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,19,19]
def forward(self,x):
out = self.trunk(x)
return out
class ConvNetS(nn.Module): #For omniglot, only 1 input channel, output dim is 64
def __init__(self, depth, flatten = True):
super(ConvNetS,self).__init__()
trunk = []
for i in range(depth):
indim = 1 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i <4 ) ) #only pooling for fist 4 layers
trunk.append(B)
if flatten:
trunk.append(Flatten())
#trunk.append(nn.BatchNorm1d(64)) #TODO remove
#trunk.append(nn.ReLU(inplace=True)) #TODO remove
#trunk.append(nn.Linear(64, 64)) #TODO remove
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = 64
def forward(self,x):
out = x[:,0:1,:,:] #only use the first dimension
out = self.trunk(out)
#out = torch.tanh(out) #TODO remove
return out
class ConvNetSNopool(nn.Module): #Relation net use a 4 layer conv with pooling in only first two layers, else no pooling. For omniglot, only 1 input channel, output dim is [64,5,5]
def __init__(self, depth):
super(ConvNetSNopool,self).__init__()
trunk = []
for i in range(depth):
indim = 1 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool = ( i in [0,1] ), padding = 0 if i in[0,1] else 1 ) #only first two layer has pooling and no padding
trunk.append(B)
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = [64,5,5]
def forward(self,x):
out = x[:,0:1,:,:] #only use the first dimension
out = self.trunk(out)
return out
class ResNet(nn.Module):
maml = False #Default
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = True):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
if self.maml:
conv1 = Conv2d_fw(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BatchNorm2d_fw(64)
else:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
avgpool = nn.AvgPool2d(7)
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
# Backbone for QMUL regression
class Conv3(nn.Module):
def __init__(self):
super(Conv3, self).__init__()
self.layer1 = nn.Conv2d(3, 36, 3,stride=2,dilation=2)
self.layer2 = nn.Conv2d(36,36, 3,stride=2,dilation=2)
self.layer3 = nn.Conv2d(36,36, 3,stride=2,dilation=2)
def return_clones(self):
layer1_w = self.layer1.weight.data.clone().detach()
layer2_w = self.layer2.weight.data.clone().detach()
layer3_w = self.layer3.weight.data.clone().detach()
return [layer1_w, layer2_w, layer3_w]
def assign_clones(self, weights_list):
self.layer1.weight.data.copy_(weights_list[0])
self.layer2.weight.data.copy_(weights_list[1])
self.layer3.weight.data.copy_(weights_list[2])
def forward(self, x):
out = F.relu(self.layer1(x))
out = F.relu(self.layer2(out))
out = F.relu(self.layer3(out))
out = out.view(out.size(0), -1)
return out
# just to test the kernel hypothesis
class BackboneKernel(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_layers: int, hidden_dim: int, flatten: bool =False, **kwargs):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.flatten = flatten
self.model = self.create_model()
def create_model(self):
assert self.num_layers >= 1, "Number of hidden layers must be at least 1"
modules = [nn.Linear(self.input_dim, self.hidden_dim), nn.ReLU()]
if self.flatten:
modules = [nn.Flatten()] + modules
for i in range(self.num_layers - 1):
modules.append(nn.Linear(self.hidden_dim, self.hidden_dim))
modules.append(nn.ReLU())
modules.append(nn.Linear(self.hidden_dim, self.output_dim))
model = nn.Sequential(*modules)
return model
def forward(self, x, **params):
r"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`):
First set of data
:attr:`x2` (Tensor `m x d` or `b x m x d`):
Second set of data
:attr:`diag` (bool):
Should the Kernel compute the whole kernel, or just the diag?
:attr:`last_dim_is_batch` (tuple, optional):
If this is true, it treats the last dimension of the data as another batch dimension.
(Useful for additive structure over the dimensions). Default: False
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `last_dim_is_batch=True`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `last_dim_is_batch=True`: `k x n` or `b x k x n`
"""
out = self.model(x)
return out
class ConvNet4WithKernel(nn.Module):
def __init__(self):
super(ConvNet4WithKernel, self).__init__()
conv_out_size = 1600
hn_kernel_layers_no = 4
hn_kernel_hidden_dim = 64
self.input_dim = conv_out_size
self.output_dim = conv_out_size
self.num_layers = hn_kernel_layers_no
self.hidden_dim = hn_kernel_hidden_dim
self.Conv4 = ConvNet(4)
self.nn_kernel = BackboneKernel(self.input_dim, self.output_dim,
self.num_layers, self.hidden_dim)
self.final_feat_dim = self.output_dim
def forward(self, x):
x = self.Conv4(x)
out = self.nn_kernel(x)
return out
class ResNet10WithKernel(nn.Module):
def __init__(self):
super(ResNet10WithKernel, self).__init__()
conv_out_size = None
hn_kernel_layers_no = None
hn_kernel_hidden_dim = None
self.input_dim = conv_out_size
self.output_dim = conv_out_size
self.num_layers = hn_kernel_layers_no
self.hidden_dim = hn_kernel_hidden_dim
self.Conv4 = ConvNet(4)
self.nn_kernel = BackboneKernel(self.input_dim, self.output_dim,
self.num_layers, self.hidden_dim)
def forward(self, x):
x = self.Conv4(x)
x = torch.unsqueeze(torch.flatten(x), 0)
out = self.nn_kernel(x)
return out
def Conv4():
return ConvNet(4)
def Conv4Pool():
return ConvNet(4, pool=True)
def Conv6():
return ConvNet(6)
def Conv4NP():
return ConvNetNopool(4)
def Conv6NP():
return ConvNetNopool(6)
def Conv4S():
return ConvNetS(4)
def Conv4SNP():
return ConvNetSNopool(4)
def ResNet10( flatten = True):
return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)
def ResNet12(flatten=True):
from learn2learn.vision.models import resnet12
class R12(nn.Module):
def __init__(self):
super().__init__()
self.model = resnet12.ResNet12Backbone()
self.avgpool = nn.AvgPool2d(14)
self.flat = nn.Flatten()
self.final_feat_dim = 640 # 640
def forward(self, x):
x = self.model(x)
return x
return R12()
def ResNet18( flatten = True):
return ResNet(SimpleBlock, [2,2,2,2],[64,128,256,512], flatten)
def ResNet34( flatten = True):
return ResNet(SimpleBlock, [3,4,6,3],[64,128,256,512], flatten)
def ResNet50( flatten = True):
return ResNet(BottleneckBlock, [3,4,6,3], [256,512,1024,2048], flatten)
def ResNet101( flatten = True):
return ResNet(BottleneckBlock, [3,4,23,3],[256,512,1024,2048], flatten)
def Conv4WithKernel():
return ConvNet4WithKernel()
def ResNetWithKernel():
return ResNet10WithKernel()
| 21,085 | 35.355172 | 206 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/train_regression.py | import torch
import torch.nn as nn
import torch.optim as optim
import configs
from data.qmul_loader import get_batch, train_people, test_people
from io_utils import parse_args_regression, get_resume_file
from methods.DKT_regression import DKT
from methods.feature_transfer_regression import FeatureTransfer
import backbone
import os
import numpy as np
params = parse_args_regression('train_regression')
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
params.checkpoint_dir = '%scheckpoints/%s/' % (configs.save_dir, params.dataset)
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
params.checkpoint_dir = '%scheckpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
bb = backbone.Conv3().cuda()
if params.method=='DKT':
model = DKT(bb).cuda()
elif params.method=='transfer':
model = FeatureTransfer(bb).cuda()
else:
ValueError('Unrecognised method')
optimizer = torch.optim.Adam([{'params': model.model.parameters(), 'lr': 0.001},
{'params': model.feature_extractor.parameters(), 'lr': 0.001}])
for epoch in range(params.stop_epoch):
model.train_loop(epoch, optimizer)
model.save_checkpoint(params.checkpoint_dir)
| 1,334 | 32.375 | 114 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/save_features.py | import numpy as np
import torch
from torch.autograd import Variable
import os
import glob
import h5py
import configs
import backbone
from data.datamgr import SimpleDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
def save_features(model, data_loader, outfile ):
f = h5py.File(outfile, 'w')
max_count = len(data_loader)*data_loader.batch_size
all_labels = f.create_dataset('all_labels',(max_count,), dtype='i')
all_feats=None
count=0
for i, (x,y) in enumerate(data_loader):
if i%10 == 0:
print('{:d}/{:d}'.format(i, len(data_loader)))
x = x.cuda()
x_var = Variable(x)
feats = model(x_var)
if all_feats is None:
all_feats = f.create_dataset('all_feats', [max_count] + list( feats.size()[1:]) , dtype='f')
all_feats[count:count+feats.size(0)] = feats.data.cpu().numpy()
all_labels[count:count+feats.size(0)] = y.cpu().numpy()
count = count + feats.size(0)
count_var = f.create_dataset('count', (1,), dtype='i')
count_var[0] = count
f.close()
def do_save_fts(params):
illegal_models = [
"maml", "maml_approx", "hyper_maml", "bayes_hmaml", "DKT",
] + list(hypernet_types.keys())
assert params.method not in illegal_models, 'maml do not support save_feature and run'
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
params.model = 'Conv4S'
split = params.split
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split + '.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split + '.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++']:
checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
checkpoint_dir = checkpoint_dir + "_" + params.checkpoint_suffix
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++']:
modelfile = get_resume_file(checkpoint_dir)
else:
print("looking for best file in", checkpoint_dir)
modelfile = get_best_file(checkpoint_dir)
print("got", modelfile)
if params.save_iter != -1:
outfile = os.path.join(checkpoint_dir.replace("checkpoints", "features"),
split + "_" + str(params.save_iter) + ".hdf5")
else:
outfile = os.path.join(checkpoint_dir.replace("checkpoints", "features"), split + ".hdf5")
datamgr = SimpleDataManager(image_size, batch_size=64)
data_loader = datamgr.get_data_loader(loadfile, aug=False)
if params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
model = backbone.Conv4NP()
elif params.model == 'Conv6':
model = backbone.Conv6NP()
elif params.model == 'Conv4S':
model = backbone.Conv4SNP()
else:
model = model_dict[params.model](flatten=False)
elif params.method in ['maml', 'maml_approx']:
raise ValueError('MAML do not support save feature')
else:
model = model_dict[params.model]()
model = model.cuda()
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.",
"") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.load_state_dict(state)
model.eval()
dirname = os.path.dirname(outfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
save_features(model, data_loader, outfile)
if __name__ == '__main__':
params = parse_args('save_features')
do_save_fts(params)
| 5,138 | 35.707143 | 178 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/train.py | import json
import sys
from collections import defaultdict
from typing import Type, List, Union, Dict, Optional
from copy import deepcopy
import numpy as np
import torch
import random
from neptune.new import Run
import torch.optim
import torch.optim.lr_scheduler as lr_scheduler
import os
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.DKT import DKT
from methods.hypernets.hypernet_poc import HyperNetPOC
from methods.hypernets import hypernet_types
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from methods.hypernets.bayeshmaml import BayesHMAML
from methods.hypernets.hypermaml import HyperMAML
from io_utils import model_dict, parse_args, get_resume_file, setup_neptune
from neptune.new.types import File
import matplotlib.pyplot as plt
from pathlib import Path
from save_features import do_save_fts
from test import perform_test
def _set_seed(seed, verbose=True):
if (seed != 0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if (verbose): print("[INFO] Setting SEED: " + str(seed))
else:
if (verbose): print("[INFO] Setting SEED: None")
def train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params, *,
neptune_run: Optional[Run] = None):
print("Tot epochs: " + str(stop_epoch))
if optimization == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
elif optimization == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=params.lr)
else:
raise ValueError(f'Unknown optimization {optimization}, please define by yourself')
max_acc = 0
max_train_acc = 0
max_acc_adaptation_dict = {}
if params.hm_set_forward_with_adaptation:
max_acc_adaptation_dict = {}
for i in range(params.hn_val_epochs + 1):
if i != 0:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = 0
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = 0
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if (Path(params.checkpoint_dir) / "metrics.json").exists() and params.resume:
with (Path(params.checkpoint_dir) / "metrics.json").open("r") as f:
try:
metrics_per_epoch = defaultdict(list, json.load(f))
try:
max_acc = metrics_per_epoch["accuracy/val_max"][-1]
max_train_acc = metrics_per_epoch["accuracy/train_max"][-1]
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = \
metrics_per_epoch[f"accuracy/val_support_max@-{i}"][-1]
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = \
metrics_per_epoch[f"accuracy/val_max@-{i}"][-1]
except:
max_acc = metrics_per_epoch["accuracy_val_max"][-1]
max_train_acc = metrics_per_epoch["accuracy_train_max"][-1]
except:
metrics_per_epoch = defaultdict(list)
else:
metrics_per_epoch = defaultdict(list)
scheduler = get_scheduler(params, optimizer, stop_epoch)
print("Starting training")
print("Params accessed until this point:")
print("\n\t".join(sorted(params.history)))
print("Params ignored until this point:")
print("\n\t".join(params.get_ignored_args()))
delta_params_list = []
for epoch in range(start_epoch, stop_epoch):
if epoch >= params.es_epoch:
if max_acc < params.es_threshold:
print("Breaking training at epoch", epoch, "because max accuracy", max_acc, "is lower than threshold",
params.es_threshold)
break
model.epoch = epoch
model.start_epoch = start_epoch
model.stop_epoch = stop_epoch
model.train()
if params.method in ['hyper_maml','bayes_hmaml']:
metrics = model.train_loop(epoch, base_loader, optimizer)
else:
metrics = model.train_loop(epoch, base_loader, optimizer) # model are called by reference, no need to return
scheduler.step()
model.eval()
delta_params = metrics.pop('delta_params', None)
if delta_params is not None:
delta_params_list.append(delta_params)
if (epoch % params.eval_freq == 0) or epoch in [
params.es_epoch - 1,
stop_epoch - 1
]:
try:
acc, test_loop_metrics = model.test_loop(val_loader)
except:
acc = model.test_loop(val_loader)
test_loop_metrics = dict()
print(
f"Epoch {epoch}/{stop_epoch} | Max test acc {max_acc:.2f} | Test acc {acc:.2f} | Metrics: {test_loop_metrics}")
metrics = metrics or dict()
metrics["lr"] = scheduler.get_lr()
metrics["accuracy/val"] = acc
metrics["accuracy/val_max"] = max_acc
metrics["accuracy/train_max"] = max_train_acc
metrics = {
**metrics,
**test_loop_metrics,
**max_acc_adaptation_dict
}
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0:
metrics[f"accuracy/val_support_max@-{i}"] = max_acc_adaptation_dict[
f"accuracy/val_support_max@-{i}"]
metrics[f"accuracy/val_max@-{i}"] = max_acc_adaptation_dict[f"accuracy/val_max@-{i}"]
if metrics["accuracy/train"] > max_train_acc:
max_train_acc = metrics["accuracy/train"]
if params.hm_set_forward_with_adaptation:
for i in range(params.hn_val_epochs + 1):
if i != 0 and metrics[f"accuracy/val_support_acc@-{i}"] > max_acc_adaptation_dict[
f"accuracy/val_support_max@-{i}"]:
max_acc_adaptation_dict[f"accuracy/val_support_max@-{i}"] = metrics[
f"accuracy/val_support_acc@-{i}"]
if metrics[f"accuracy/val@-{i}"] > max_acc_adaptation_dict[f"accuracy/val_max@-{i}"]:
max_acc_adaptation_dict[f"accuracy/val_max@-{i}"] = metrics[f"accuracy/val@-{i}"]
if acc > max_acc: # for baseline and baseline++, we don't use validation here so we let acc = -1
print("--> Best model! save...")
max_acc = acc
outfile = os.path.join(params.checkpoint_dir, 'best_model.tar')
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if params.maml_save_feature_network and params.method in ['maml', 'hyper_maml','bayes_hmaml']:
outfile = os.path.join(params.checkpoint_dir, 'best_feature_net.tar')
torch.save({'epoch': epoch, 'state': model.feature.state_dict()}, outfile)
outfile = os.path.join(params.checkpoint_dir, 'last_model.tar')
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if params.maml_save_feature_network and params.method in ['maml', 'hyper_maml','bayes_hmaml']:
outfile = os.path.join(params.checkpoint_dir, 'last_feature_net.tar')
torch.save({'epoch': epoch, 'state': model.feature.state_dict()}, outfile)
if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1):
outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch))
torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
if metrics is not None:
for k, v in metrics.items():
metrics_per_epoch[k].append(v)
with (Path(params.checkpoint_dir) / "metrics.json").open("w") as f:
json.dump(metrics_per_epoch, f, indent=2)
if neptune_run is not None:
for m, v in metrics.items():
neptune_run[m].log(v, step=epoch)
if neptune_run is not None:
neptune_run["best_model"].track_files(os.path.join(params.checkpoint_dir, 'best_model.tar'))
neptune_run["last_model"].track_files(os.path.join(params.checkpoint_dir, 'last_model.tar'))
if params.maml_save_feature_network:
neptune_run["best_feature_net"].track_files(os.path.join(params.checkpoint_dir, 'best_feature_net.tar'))
neptune_run["last_feature_net"].track_files(os.path.join(params.checkpoint_dir, 'last_feature_net.tar'))
if len(delta_params_list) > 0 and params.hm_save_delta_params:
with (Path(params.checkpoint_dir) / f"delta_params_list_{len(delta_params_list)}.json").open("w") as f:
json.dump(delta_params_list, f, indent=2)
return model
def plot_metrics(metrics_per_epoch: Dict[str, Union[List[float], float]], epoch: int, fig_dir: Path):
for m, values in metrics_per_epoch.items():
plt.figure()
if "accuracy" in m:
plt.ylim((0, 100))
plt.errorbar(
list(range(len(values))),
[
np.mean(v) if isinstance(v, list) else v for v in values
],
[
np.std(v) if isinstance(v, list) else 0 for v in values
],
ecolor="black",
fmt="o",
)
plt.grid()
plt.title(f"{epoch}- {m}")
plt.savefig(fig_dir / f"{m}.png")
plt.close()
def get_scheduler(params, optimizer, stop_epoch=None) -> lr_scheduler._LRScheduler:
if params.lr_scheduler == "multisteplr":
if params.milestones is not None:
milestones = params.milestones
else:
milestones = list(range(0, params.stop_epoch, params.stop_epoch // 4))[1:]
return lr_scheduler.MultiStepLR(optimizer, milestones=milestones,
gamma=0.3)
elif params.lr_scheduler == "none":
return lr_scheduler.MultiStepLR(optimizer,
milestones=list(range(0, params.stop_epoch, params.stop_epoch // 4))[1:],
gamma=1)
elif params.lr_scheduler == "cosine":
T_0 = stop_epoch if stop_epoch is not None else params.stop_epoch // 4
return lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=T_0
)
raise TypeError(params.lr_scheduler)
if __name__ == '__main__':
params = parse_args('train')
_set_seed(params.seed)
if params.dataset == 'cross':
base_file = configs.data_dir['miniImagenet'] + 'all.json'
val_file = configs.data_dir['CUB'] + 'val.json'
elif params.dataset == 'cross_char':
base_file = configs.data_dir['omniglot'] + 'noLatin.json'
val_file = configs.data_dir['emnist'] + 'val.json'
else:
base_file = configs.data_dir[params.dataset] + 'base.json'
val_file = configs.data_dir[params.dataset] + 'val.json'
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
# params.model = 'Conv4S'
# no need for this, since omniglot is loaded as RGB
# optimization = 'Adam'
optimization = params.optim
if params.stop_epoch == -1:
if params.method in ['baseline', 'baseline++']:
if params.dataset in ['omniglot', 'cross_char']:
params.stop_epoch = 5
elif params.dataset in ['CUB']:
params.stop_epoch = 200 # This is different as stated in the open-review paper. However, using 400 epoch in baseline actually lead to over-fitting
elif params.dataset in ['miniImagenet', 'cross']:
params.stop_epoch = 400
else:
params.stop_epoch = 400 # default
else: # meta-learning methods
if params.n_shot == 1:
params.stop_epoch = 600
elif params.n_shot == 5:
params.stop_epoch = 400
else:
params.stop_epoch = 600 # default
if params.method in ['baseline', 'baseline++']:
base_datamgr = SimpleDataManager(image_size, batch_size=16)
base_loader = base_datamgr.get_data_loader(base_file, aug=params.train_aug)
val_datamgr = SimpleDataManager(image_size, batch_size=64)
val_loader = val_datamgr.get_data_loader(val_file, aug=False)
if params.dataset == 'omniglot':
assert params.num_classes >= 4112, 'class number need to be larger than max label id in base class'
if params.dataset == 'cross_char':
assert params.num_classes >= 1597, 'class number need to be larger than max label id in base class'
if params.method == 'baseline':
model = BaselineTrain(model_dict[params.model], params.num_classes)
elif params.method == 'baseline++':
model = BaselineTrain(model_dict[params.model], params.num_classes, loss_type='dist')
elif params.method in ['DKT', 'protonet', 'matchingnet', 'relationnet', 'relationnet_softmax', 'maml',
'maml_approx', 'hyper_maml','bayes_hmaml'] + list(hypernet_types.keys()):
n_query = max(1, int(
16 * params.test_n_way / params.train_n_way)) # if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
print("n_query", n_query)
train_few_shot_params = dict(n_way=params.train_n_way, n_support=params.n_shot, n_query=n_query)
base_datamgr = SetDataManager(image_size, **train_few_shot_params) # n_eposide=100
base_loader = base_datamgr.get_data_loader(base_file, aug=params.train_aug)
test_few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot, n_query=n_query)
val_datamgr = SetDataManager(image_size, **test_few_shot_params)
val_loader = val_datamgr.get_data_loader(val_file, aug=False)
# a batch for SetDataManager: a [n_way, n_support + n_query, dim, w, h] tensor
if (params.method == 'DKT'):
dkt_train_few_shot_params = dict(n_way=params.train_n_way, n_support=params.n_shot)
model = DKT(model_dict[params.model], **dkt_train_few_shot_params)
model.init_summary()
elif params.method == 'protonet':
model = ProtoNet(model_dict[params.model], **train_few_shot_params)
elif params.method == 'matchingnet':
model = MatchingNet(model_dict[params.model], **train_few_shot_params)
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model](flatten=False)
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet(feature_model, loss_type=loss_type, **train_few_shot_params)
elif params.method in ['maml', 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
elif params.method in hypernet_types.keys():
hn_type: Type[HyperNetPOC] = hypernet_types[params.method]
model = hn_type(model_dict[params.model], params=params, **train_few_shot_params)
elif params.method == "hyper_maml" or params.method == 'bayes_hmaml':
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
if params.method == 'bayes_hmaml':
model = BayesHMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
else:
model = HyperMAML(model_dict[params.model], params=params, approx=(params.method == 'maml_approx'),
**train_few_shot_params)
if params.dataset in ['omniglot', 'cross_char']: # maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
model = model.cuda()
params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
params.checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++']:
params.checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)
if params.checkpoint_suffix != "":
params.checkpoint_dir = params.checkpoint_dir + "_" + params.checkpoint_suffix
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
print(params.checkpoint_dir)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
if params.method in ['maml', 'maml_approx', 'hyper_maml','bayes_hmaml']:
stop_epoch = params.stop_epoch * model.n_task # maml use multiple tasks in one update
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir)
print(resume_file)
if resume_file is not None:
tmp = torch.load(resume_file)
start_epoch = tmp['epoch'] + 1
model.load_state_dict(tmp['state'])
print("Resuming training from", resume_file, "epoch", start_epoch)
elif params.warmup: # We also support warmup from pretrained baseline feature, but we never used in our paper
baseline_checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
configs.save_dir, params.dataset, params.model, 'baseline')
if params.train_aug:
baseline_checkpoint_dir += '_aug'
warmup_resume_file = get_resume_file(baseline_checkpoint_dir)
tmp = torch.load(warmup_resume_file)
if tmp is not None:
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.",
"") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.feature.load_state_dict(state)
else:
raise ValueError('No warm_up file')
args_dict = vars(params.params)
with (Path(params.checkpoint_dir) / "args.json").open("w") as f:
json.dump(
{
k: v if isinstance(v, (int, str, bool, float)) else str(v)
for (k, v) in args_dict.items()
},
f,
indent=2,
)
with (Path(params.checkpoint_dir) / "rerun.sh").open("w") as f:
print("python", " ".join(sys.argv), file=f)
neptune_run = setup_neptune(params)
if neptune_run is not None:
neptune_run["model"] = str(model)
if not params.evaluate_model:
model = train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params,
neptune_run=neptune_run)
params.split = "novel"
params.save_iter = -1
try:
do_save_fts(params)
except Exception as e:
print("Cannot save features bc of", e)
val_datasets = [params.dataset]
if params.dataset in ["cross", "miniImagenet"]:
val_datasets = ["cross", "miniImagenet"]
for d in val_datasets:
print("Evaluating on", d)
params.dataset = d
# num of epochs for finetuning on testing.
for hn_val_epochs in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 25, 50, 100, 200]:
params.hn_val_epochs = hn_val_epochs
params.hm_set_forward_with_adaptation = True
# add default test params
params.adaptation = True
params.repeat = 5
print(f"Testing with {hn_val_epochs=}")
test_results = perform_test(params)
if neptune_run is not None:
neptune_run[f"full_test/{d}/metrics @ {hn_val_epochs}"] = test_results
| 21,822 | 42.733467 | 186 | py |
few-shot-hypernets-public | few-shot-hypernets-public-master/methods/relationnet.py | # This code is modified from https://github.com/floodsung/LearningToCompare_FSL
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
import utils
class RelationNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, loss_type = 'mse', n_query=None):
super(RelationNet, self).__init__(model_func, n_way, n_support)
self.loss_type = loss_type #'softmax'# 'mse'
self.relation_module = RelationModule( self.feat_dim , 8, self.loss_type ) #relation net features are not pooled, so self.feat_dim is [dim, w, h]
if self.loss_type == 'mse':
self.loss_fn = nn.MSELoss()
else:
self.loss_fn = nn.CrossEntropyLoss()
def set_forward(self,x,is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
z_proto = z_support.view( self.n_way, self.n_support, *self.feat_dim ).mean(1)
z_query = z_query.contiguous().view( self.n_way* self.n_query, *self.feat_dim )
z_proto_ext = z_proto.unsqueeze(0).repeat(self.n_query* self.n_way,1,1,1,1)
z_query_ext = z_query.unsqueeze(0).repeat( self.n_way,1,1,1,1)
z_query_ext = torch.transpose(z_query_ext,0,1)
extend_final_feat_dim = self.feat_dim.copy()
extend_final_feat_dim[0] *= 2
relation_pairs = torch.cat((z_proto_ext,z_query_ext),2).view(-1, *extend_final_feat_dim)
relations = self.relation_module(relation_pairs).view(-1, self.n_way)
return relations
def set_forward_adaptation(self,x,is_feature = True): #overwrite parent function
assert is_feature == True, 'Finetune only support fixed feature'
full_n_support = self.n_support
full_n_query = self.n_query
relation_module_clone = RelationModule( self.feat_dim , 8, self.loss_type )
relation_module_clone.load_state_dict(self.relation_module.state_dict())
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
set_optimizer = torch.optim.SGD(self.relation_module.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
self.n_support = 3
self.n_query = 2
z_support_cpu = z_support.data.cpu().numpy()
for epoch in range(100):
perm_id = np.random.permutation(full_n_support).tolist()
sub_x = np.array([z_support_cpu[i,perm_id,:,:,:] for i in range(z_support.size(0))])
sub_x = torch.Tensor(sub_x).cuda()
if self.change_way:
self.n_way = sub_x.size(0)
set_optimizer.zero_grad()
y = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
scores = self.set_forward(sub_x, is_feature = True)
if self.loss_type == 'mse':
y_oh = utils.one_hot(y, self.n_way)
y_oh = Variable(y_oh.cuda())
loss = self.loss_fn(scores, y_oh )
else:
y = Variable(y.cuda())
loss = self.loss_fn(scores, y )
loss.backward()
set_optimizer.step()
self.n_support = full_n_support
self.n_query = full_n_query
z_proto = z_support.view( self.n_way, self.n_support, *self.feat_dim ).mean(1)
z_query = z_query.contiguous().view( self.n_way* self.n_query, *self.feat_dim )
z_proto_ext = z_proto.unsqueeze(0).repeat(self.n_query* self.n_way,1,1,1,1)
z_query_ext = z_query.unsqueeze(0).repeat( self.n_way,1,1,1,1)
z_query_ext = torch.transpose(z_query_ext,0,1)
extend_final_feat_dim = self.feat_dim.copy()
extend_final_feat_dim[0] *= 2
relation_pairs = torch.cat((z_proto_ext,z_query_ext),2).view(-1, *extend_final_feat_dim)
relations = self.relation_module(relation_pairs).view(-1, self.n_way)
self.relation_module.load_state_dict(relation_module_clone.state_dict())
return relations
def set_forward_loss(self, x):
y = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
scores = self.set_forward(x)
if self.loss_type == 'mse':
y_oh = utils.one_hot(y, self.n_way)
y_oh = Variable(y_oh.cuda())
return self.loss_fn(scores, y_oh )
else:
y = Variable(y.cuda())
return self.loss_fn(scores, y )
class RelationConvBlock(nn.Module):
def __init__(self, indim, outdim, padding = 0):
super(RelationConvBlock, self).__init__()
self.indim = indim
self.outdim = outdim
self.C = nn.Conv2d(indim, outdim, 3, padding = padding )
self.BN = nn.BatchNorm2d(outdim, momentum=1, affine=True)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(2)
self.parametrized_layers = [self.C, self.BN, self.relu, self.pool]
for layer in self.parametrized_layers:
backbone.init_layer(layer)
self.trunk = nn.Sequential(*self.parametrized_layers)
def forward(self,x):
out = self.trunk(x)
return out
class RelationModule(nn.Module):
"""docstring for RelationNetwork"""
def __init__(self,input_size,hidden_size, loss_type = 'mse'):
super(RelationModule, self).__init__()
self.loss_type = loss_type
padding = 1 if ( input_size[1] <10 ) and ( input_size[2] <10 ) else 0 # when using Resnet, conv map without avgpooling is 7x7, need padding in block to do pooling
self.layer1 = RelationConvBlock(input_size[0]*2, input_size[0], padding = padding )
self.layer2 = RelationConvBlock(input_size[0], input_size[0], padding = padding )
shrink_s = lambda s: int((int((s- 2 + 2*padding)/2)-2 + 2*padding)/2)
self.fc1 = nn.Linear( input_size[0]* shrink_s(input_size[1]) * shrink_s(input_size[2]), hidden_size )
self.fc2 = nn.Linear( hidden_size,1)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0),-1)
out = F.relu(self.fc1(out))
if self.loss_type == 'mse':
out = F.sigmoid(self.fc2(out))
elif self.loss_type == 'softmax':
out = self.fc2(out)
return out
| 6,459 | 40.677419 | 170 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.