repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
filter-pruning-geometric-median | filter-pruning-geometric-median-master/original_train.py | # https://github.com/pytorch/vision/blob/master/torchvision/models/__init__.py
import argparse
import os, sys
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models
from utils import convert_secs2time, time_string, time_file_str
#from models import print_log
import models
import random
import numpy as np
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--save_dir', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.prefix = time_file_str()
def main():
best_prec1 = 0
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
log = open(os.path.join(args.save_dir, '{}.{}.log'.format(args.arch,args.prefix)), 'w')
# version information
print_log("PyThon version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("PyTorch version : {}".format(torch.__version__), log)
print_log("cuDNN version : {}".format(torch.backends.cudnn.version()), log)
print_log("Vision version : {}".format(torchvision.__version__), log)
# create model
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](pretrained=False)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
filename = os.path.join(args.save_dir, 'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix))
bestname = os.path.join(args.save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix))
start_time = time.time()
epoch_time = AverageMeter()
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.val * (args.epochs-epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log(' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s}'.format(args.arch, epoch, args.epochs, time_string(), need_time), log)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, log)
# evaluate on validation set
val_acc_2 = validate(val_loader, model, criterion, log)
# remember best prec@1 and save checkpoint
is_best = val_acc_2 > best_prec1
best_prec1 = max(val_acc_2, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename, bestname)
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
log.close()
def train(train_loader, model, criterion, optimizer, epoch, log):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), log)
def validate(val_loader, model, criterion, log):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg), log)
return top1.avg
def save_checkpoint(state, is_best, filename, bestname):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 11,668 | 36.641935 | 139 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/functions/infer_pruned.py | # https://github.com/pytorch/vision/blob/master/torchvision/models/__init__.py
import argparse
import os
import shutil
import pdb, time
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from utils import convert_secs2time, time_string, time_file_str
import models
import numpy as np
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--save_dir', type=str, default='./', help='Folder to save checkpoints and log.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--print-freq', '-p', default=5, type=int, metavar='N', help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
# compress rate
parser.add_argument('--rate', type=float, default=0.9, help='compress rate of model')
parser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')
parser.add_argument('--skip_downsample', type=int, default=1, help='compress layer of model')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--eval_small', dest='eval_small', action='store_true', help='whether a big or small model')
parser.add_argument('--small_model', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.prefix = time_file_str()
def main():
best_prec1 = 0
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
log = open(os.path.join(args.save_dir, 'gpu-time.{}.{}.log'.format(args.arch, args.prefix)), 'w')
# create model
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](pretrained=False)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
print_log("Compress Rate: {}".format(args.rate), log)
print_log("Epoch prune: {}".format(args.epoch_prune), log)
print_log("Skip downsample : {}".format(args.skip_downsample), log)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
state_dict = checkpoint['state_dict']
state_dict = remove_module_dict(state_dict)
model.load_state_dict(state_dict)
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
cudnn.benchmark = True
# Data loading code
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Scale(256),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
if args.evaluate:
print_log("eval true", log)
if not args.eval_small:
big_model = model.cuda()
print_log('Evaluate: big model', log)
print_log('big model accu: {}'.format(validate(val_loader, big_model, criterion, log)), log)
else:
print_log('Evaluate: small model', log)
if args.small_model:
if os.path.isfile(args.small_model):
print_log("=> loading small model '{}'".format(args.small_model), log)
small_model = torch.load(args.small_model)
for x, y in zip(small_model.named_parameters(), model.named_parameters()):
print_log("name of layer: {}\n\t *** small model {}\n\t *** big model {}".format(x[0], x[1].size(),
y[1].size()), log)
if args.use_cuda:
small_model = small_model.cuda()
print_log('small model accu: {}'.format(validate(val_loader, small_model, criterion, log)), log)
else:
print_log("=> no small model found at '{}'".format(args.small_model), log)
return
def validate(val_loader, model, criterion, log):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# target = target.cuda(async=True)
if args.use_cuda:
input, target = input.cuda(), target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg), log)
return top1.avg
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def remove_module_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = name[7:] if name.startswith("module.") else name # remove `module.`
new_state_dict[name] = v
return new_state_dict
if __name__ == '__main__':
main()
| 8,795 | 38.981818 | 124 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnet_small.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
#from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math,time
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, supply, index, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.inplanes = inplanes
self.index = index
self.supply = supply
self.size = 0
self.out = torch.autograd.Variable(torch.rand(128, self.supply, 16*32//self.supply, 16*32//self.supply))
self.i = 0
self.time = []
self.sum = []
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
out = self.out
# out.zero_()
# out = torch.FloatTensor(self.inplanes, basicblock.size()[1], basicblock.size()[2]).zero_()
# out.index_add_(0, self.index[0], residual.data)
# out.index_add_(0, self.index[1], basicblock.data)
# out = torch.rand(self.inplanes, basicblock.size()[1], basicblock.size()[2])
# time1 =time.time()
# self.i += 1
# if self.i == 1:
# self.out = torch.autograd.Variable(torch.rand(basicblock.size()[0], self.supply, basicblock.size()[2], basicblock.size()[3]))
# print("set out")
# print(self.i,basicblock.size()[0])
# elif self.i in [79]:
# self.out = torch.autograd.Variable(torch.rand(basicblock.size()[0], self.supply, basicblock.size()[2], basicblock.size()[3]))
# else:
# print(self.i)
# pass
# out = self.out.cuda()
# time2 = time.time()
# print ('function took %0.3f ms' % ((time2-time1)*1000.0))
# self.time.append((time2-time1)*1000.0)
# self.sum.append(sum(self.time))
# print("sum:",self.sum)
# print(' self.time %f',self.time)
# time2 = time.time()
#
# self.out = torch.autograd.Variable(torch.rand(basicblock.size()[0], self.supply, basicblock.size()[2], basicblock.size()[3]))
# out = self.out.cuda()
# time3 = time.time()
# print ('function took %0.3f ms' % ((time2-time1)*1000.0))
#
# print ('function took %0.3f ms' % ((time3-time2)*1000.0))
# self.time .append((time3-time2)*1000.0)
# #print(' self.time %f',self.time)
## print("sum:",sum(self.time))
# self.sum .append(sum(self.time))
# print(self.sum)
return F.relu(out, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, index, rate=[16, 16, 32, 64, 16, 32, 64]):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.stage_num = (depth - 2) // 3
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.rate = rate
print(layer_blocks)
self.conv_1_3x3 = nn.Conv2d(3, rate[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(rate[0])
self.inplanes = rate[0]
self.stage_1 = self._make_layer(block, rate[4], rate[1], rate[4], index, layer_blocks, 1)
self.stage_2 = self._make_layer(block, rate[5], rate[2], rate[5], index, layer_blocks, 2)
self.stage_3 = self._make_layer(block, rate[6], rate[3], rate[6], index, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, inplanes, planes, supply, index, blocks, stride=1):
downsample = None
if stride != 1 :
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
# print('first inplane, out plane, supply', self.inplanes,planes,supply)
layers.append(block(self.inplanes, planes, supply, index, stride, downsample))
# self.inplanes = planes * block.expansion
self.inplanes = inplanes
# print('seconde inplane, out plane,supply', self.inplanes,planes,supply)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, supply, index))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
# print(x.size())
# x = torch.autograd.Variable(torch.rand(x.size()[0], 16, x.size()[2], x.size()[3])).cuda()
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnet20_small(index, rate,num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, index, rate)
return model
def resnet32_small(index, rate,num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes,index, rate)
return model
def resnet44_small(index, rate,num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes,index, rate)
return model
def resnet56_small(index, rate,num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes,index, rate)
return model
def resnet110_small(index, rate,num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes,index, rate)
return model
| 7,399 | 34.238095 | 132 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/preresnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from .res_utils import DownsampleA, DownsampleC
import math
class ResNetBasicblock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, Type):
super(ResNetBasicblock, self).__init__()
self.Type = Type
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
basicblock = self.bn_a(x)
basicblock = self.relu(basicblock)
if self.Type == 'both_preact':
residual = basicblock
elif self.Type != 'normal':
assert False, 'Unknow type : {}'.format(self.Type)
basicblock = self.conv_a(basicblock)
basicblock = self.bn_b(basicblock)
basicblock = self.relu(basicblock)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
residual = self.downsample(residual)
return residual + basicblock
class CifarPreResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarPreResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
print ('CifarPreResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.conv_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.lastact = nn.Sequential(nn.BatchNorm2d(64*block.expansion), nn.ReLU(inplace=True))
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, 'both_preact'))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, 'normal'))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_3x3(x)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.lastact(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def preresnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarPreResNet(ResNetBasicblock, 20, num_classes)
return model
def preresnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarPreResNet(ResNetBasicblock, 32, num_classes)
return model
def preresnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarPreResNet(ResNetBasicblock, 44, num_classes)
return model
def preresnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarPreResNet(ResNetBasicblock, 56, num_classes)
return model
def preresnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarPreResNet(ResNetBasicblock, 110, num_classes)
return model
| 4,698 | 29.914474 | 98 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/imagenet_resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
# self.relu = nn.ReLU(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
print('ResNet-18 Use pretrained model for initalization')
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
print('ResNet-34 Use pretrained model for initalization')
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
print('ResNet-50 Use pretrained model for initalization')
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
print('ResNet-101 Use pretrained model for initalization')
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
print('ResNet-152 Use pretrained model for initalization')
return model
| 6,941 | 31.591549 | 78 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/imagenet_resnet_small.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
import time
__all__ = ['ResNet_small', 'resnet18_small', 'resnet34_small', 'resnet50_small', 'resnet101_small', 'resnet152_small']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes_after_prune, planes_expand, planes_before_prune, index, bn_value, stride=1,
downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes_after_prune, stride)
self.bn1 = nn.BatchNorm2d(planes_after_prune)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes_after_prune, planes_after_prune)
self.bn2 = nn.BatchNorm2d(planes_after_prune)
self.downsample = downsample
self.stride = stride
# for residual index match
self.index = Variable(index)
# for bn add
self.bn_value = bn_value
# self.out = torch.autograd.Variable(
# torch.rand(batch, self.planes_before_prune, 64 * 56 // self.planes_before_prune,
# 64 * 56 // self.planes_before_prune), volatile=True).cuda()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
# setting: without index match
# out += residual
# out = self.relu(out)
# setting: with index match
residual += self.bn_value.cuda()
residual.index_add_(1, self.index.cuda(), out)
residual = self.relu(residual)
return residual
class Bottleneck(nn.Module):
# expansion is not accurately equals to 4
expansion = 4
def __init__(self, inplanes, planes_after_prune, planes_expand, planes_before_prune, index, bn_value, stride=1,
downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes_after_prune, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes_after_prune)
self.conv2 = nn.Conv2d(planes_after_prune, planes_after_prune, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes_after_prune)
# setting: for accuracy expansion
self.conv3 = nn.Conv2d(planes_after_prune, planes_expand, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes_expand)
# setting: original resnet, expansion = 4
# self.conv3 = nn.Conv2d(planes, planes_before_prune * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes_before_prune * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# for residual index match
self.index = Variable(index)
# for bn add
self.bn_value = bn_value
# self.extend = torch.autograd.Variable(
# torch.rand(self.planes_before_prune * 4, 64 * 56 // self.planes_before_prune,
# 64 * 56 // self.planes_before_prune), volatile=True).cuda()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
# setting: without index match
# print("residual size{},out size{} ".format(residual.size(), out.size()))
# out += residual
# out = self.relu(out)
# setting: with index match
residual += self.bn_value.cuda()
residual.index_add_(1, self.index.cuda(), out)
residual = self.relu(residual)
return residual
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ResNet_small(nn.Module):
def __init__(self, block, layers, index, bn_value,
num_for_construct=[64, 64, 64 * 4, 128, 128 * 4, 256, 256 * 4, 512, 512 * 4],
num_classes=1000):
super(ResNet_small, self).__init__()
self.inplanes = num_for_construct[0]
self.conv1 = nn.Conv2d(3, num_for_construct[0], kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(num_for_construct[0])
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# setting: expansion = 4
# self.layer1 = self._make_layer(block, num_for_construct[1], num_for_construct[1] * 4, 64, index, layers[0])
# self.layer2 = self._make_layer(block, num_for_construct[2], num_for_construct[2] * 4, 128, index, layers[1], stride=2)
# self.layer3 = self._make_layer(block, num_for_construct[3], num_for_construct[3] * 4, 256, index, layers[2], stride=2)
# self.layer4 = self._make_layer(block, num_for_construct[4], num_for_construct[4] * 4, 512, index, layers[3], stride=2)
# setting: expansion may not accuracy equal to 4
self.index_layer1 = {key: index[key] for key in index.keys() if 'layer1' in key}
self.index_layer2 = {key: index[key] for key in index.keys() if 'layer2' in key}
self.index_layer3 = {key: index[key] for key in index.keys() if 'layer3' in key}
self.index_layer4 = {key: index[key] for key in index.keys() if 'layer4' in key}
self.bn_layer1 = {key: bn_value[key] for key in bn_value.keys() if 'layer1' in key}
self.bn_layer2 = {key: bn_value[key] for key in bn_value.keys() if 'layer2' in key}
self.bn_layer3 = {key: bn_value[key] for key in bn_value.keys() if 'layer3' in key}
self.bn_layer4 = {key: bn_value[key] for key in bn_value.keys() if 'layer4' in key}
# print("bn_layer1", bn_layer1.keys(), bn_layer2.keys(), bn_layer3.keys(), bn_layer4.keys())
self.layer1 = self._make_layer(block, num_for_construct[1], num_for_construct[2], 64, self.index_layer1, self.bn_layer1,
layers[0])
self.layer2 = self._make_layer(block, num_for_construct[3], num_for_construct[4], 128, self.index_layer2, self.bn_layer2,
layers[1], stride=2)
self.layer3 = self._make_layer(block, num_for_construct[5], num_for_construct[6], 256, self.index_layer3, self.bn_layer3,
layers[2], stride=2)
self.layer4 = self._make_layer(block, num_for_construct[7], num_for_construct[8], 512, self.index_layer4, self.bn_layer4,
layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes_after_prune, planes_expand, planes_before_prune, index, bn_layer, blocks,
stride=1):
downsample = None
if stride != 1 or self.inplanes != planes_before_prune * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes_before_prune * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes_before_prune * block.expansion),
)
print("before pruning is {}, after pruning is {}:".format(planes_before_prune,planes_after_prune))
# setting: accu number for_construct expansion
index_block_0_dict = {key: index[key] for key in index.keys() if '0.conv3' in key}
index_block_0_value = list(index_block_0_dict.values())[0]
bn_layer_0_value = list(bn_layer.values())[0]
layers = []
layers.append(
block(self.inplanes, planes_after_prune, planes_expand, planes_before_prune, index_block_0_value,
bn_layer_0_value,
stride, downsample))
# self.inplanes = planes * block.expansion
self.inplanes = planes_before_prune * block.expansion
for i in range(1, blocks):
index_block_i_dict = {key: index[key] for key in index.keys() if (str(i) + '.conv3') in key}
index_block_i_value = list(index_block_i_dict.values())[0]
bn_layer_i = {key: bn_layer[key] for key in bn_layer.keys() if (str(i) + '.bn3') in key}
bn_layer_i_value = list(bn_layer_i.values())[0]
layers.append(
block(self.inplanes, planes_after_prune, planes_expand, planes_before_prune, index_block_i_value,
bn_layer_i_value,
))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18_small(pretrained=False, **kwargs):
"""Constructs a ResNet_small-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_small(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34_small(pretrained=False, **kwargs):
"""Constructs a ResNet_small-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_small(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50_small(pretrained=False, **kwargs):
"""Constructs a ResNet_small-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_small(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101_small(pretrained=False, **kwargs):
"""Constructs a ResNet_small-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_small(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152_small(pretrained=False, **kwargs):
"""Constructs a ResNet_small-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_small(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 12,267 | 37.578616 | 129 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
| 4,484 | 29.931034 | 98 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/vgg.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
def vgg13_bn(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
def vgg19_bn(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model
| 5,756 | 31.162011 | 113 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/densenet.py | import math, torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, nChannels, growthRate):
super(Bottleneck, self).__init__()
interChannels = 4*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
def __init__(self, nChannels, growthRate):
super(SingleLayer, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, growthRate, depth, reduction, nClasses, bottleneck):
super(DenseNet, self).__init__()
if bottleneck: nDenseBlocks = int( (depth-4) / 6 )
else : nDenseBlocks = int( (depth-4) / 3 )
nChannels = 2*growthRate
self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans1 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans2 = Transition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.fc = nn.Linear(nChannels, nClasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(Bottleneck(nChannels, growthRate))
else:
layers.append(SingleLayer(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
def densenet100_12(num_classes=10):
model = DenseNet(12, 100, 0.5, num_classes, False)
return model
| 3,518 | 33.5 | 91 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/vgg_cifar.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
__all__ = ['vgg']
defaultcfg = {
11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class vgg(nn.Module):
def __init__(self, dataset='cifar10', depth=19, init_weights=True, cfg=None):
super(vgg, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self.cfg = cfg
self.feature = self.make_layers(cfg, True)
if dataset == 'cifar10':
num_classes = 10
elif dataset == 'cifar100':
num_classes = 100
self.classifier = nn.Sequential(
nn.Linear(cfg[-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, num_classes)
)
if init_weights:
self._initialize_weights()
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
if __name__ == '__main__':
net = vgg()
x = Variable(torch.FloatTensor(16, 3, 40, 40))
y = net(x)
print(y.data.shape) | 2,607 | 31.6 | 108 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnext.py | import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import math
class ResNeXtBottleneck(nn.Module):
expansion = 4
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, inplanes, planes, cardinality, base_width, stride=1, downsample=None):
super(ResNeXtBottleneck, self).__init__()
D = int(math.floor(planes * (base_width/64.0)))
C = cardinality
self.conv_reduce = nn.Conv2d(inplanes, D*C, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D*C)
self.conv_conv = nn.Conv2d(D*C, D*C, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D*C)
self.conv_expand = nn.Conv2d(D*C, planes*4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(planes*4)
self.downsample = downsample
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)
bottleneck = self.conv_conv(bottleneck)
bottleneck = F.relu(self.bn(bottleneck), inplace=True)
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, block, depth, cardinality, base_width, num_classes):
super(CifarResNeXt, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 9 == 0, 'depth should be one of 29, 38, 47, 56, 101'
layer_blocks = (depth - 2) // 9
self.cardinality = cardinality
self.base_width = base_width
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.inplanes = 64
self.stage_1 = self._make_layer(block, 64 , layer_blocks, 1)
self.stage_2 = self._make_layer(block, 128, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 256, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(256*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.cardinality, self.base_width, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.cardinality, self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnext29_16_64(num_classes=10):
"""Constructs a ResNeXt-29, 16*64d model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNeXt(ResNeXtBottleneck, 29, 16, 64, num_classes)
return model
def resnext29_8_64(num_classes=10):
"""Constructs a ResNeXt-29, 8*64d model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNeXt(ResNeXtBottleneck, 29, 8, 64, num_classes)
return model
| 4,180 | 31.92126 | 113 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnet_feature.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
| 4,484 | 29.931034 | 98 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/caffe_cifar.py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import math
## http://torch.ch/blog/2015/07/30/cifar.html
class CifarCaffeNet(nn.Module):
def __init__(self, num_classes):
super(CifarCaffeNet, self).__init__()
self.num_classes = num_classes
self.block_1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.BatchNorm2d(32))
self.block_2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=3, stride=2),
nn.BatchNorm2d(64))
self.block_3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.Conv2d(64,128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=3, stride=2),
nn.BatchNorm2d(128))
self.classifier = nn.Linear(128*9, self.num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def forward(self, x):
x = self.block_1.forward(x)
x = self.block_2.forward(x)
x = self.block_3.forward(x)
x = x.view(x.size(0), -1)
#print ('{}'.format(x.size()))
return self.classifier(x)
def caffe_cifar(num_classes=10):
model = CifarCaffeNet(num_classes)
return model
| 1,750 | 28.183333 | 64 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnet_small_V3.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
#from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, index, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.inplanes = inplanes
self.index = index
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
# out = self.out.cuda()
# out.zero_()
# out = torch.FloatTensor(self.inplanes, basicblock.size()[1], basicblock.size()[2]).zero_()
# out.index_add_(0, self.index[0], residual.data)
# out.index_add_(0, self.index[1], basicblock.data)
out = torch.rand(self.inplanes, basicblock.size()[1], basicblock.size()[2])
return F.relu(out, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, index, rate=[16, 16, 32, 64, 16, 32, 64]):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.stage_num = (depth - 2) // 3
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
print(len(index))
self.num_classes = num_classes
self.rate = rate
self.index = index
self.conv_1_3x3 = nn.Conv2d(3, rate[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(rate[0])
print(len(index[1 : self.stage_num + 1]))
self.inplanes = rate[0]
self.stage_1 = self._make_layer(block, rate[4], rate[1], index[1 : self.stage_num + 1], layer_blocks, 1)
self.stage_2 = self._make_layer(block, rate[5], rate[2], index[self.stage_num + 1 : self.stage_num * 2 + 1], layer_blocks, 2)
self.stage_3 = self._make_layer(block, rate[6], rate[3], index[self.stage_num * 2 + 1 : self.stage_num * 3 + 1], layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, inplanes, planes, index, blocks, stride=1):
downsample = None
if stride != 1 :
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
# print(self.inplanes)
layers = []
i=0
j=2
layers.append(block(self.inplanes, planes, index[i:j], stride, downsample))
# self.inplanes = planes * block.expansion
i += 2
j += 2
self.inplanes = inplanes
print(inplanes)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,index[i:j]))
i += 2
j += 2
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
def resnet20_small(index, rate,num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, index, rate)
return model
def resnet32_small(index, rate,num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes,index, rate)
return model
def resnet44_small(index, rate,num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes,index, rate)
return model
def resnet56_small(index, rate,num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes,index, rate)
return model
def resnet110_small(index, rate,num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes,index, rate)
return model
| 5,825 | 31.915254 | 133 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/resnet_mod.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from .res_utils import DownsampleA, DownsampleC, DownsampleD
import math
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
def forward(self, x):
if isinstance(x, list):
x, is_list, features = x[0], True, x[1:]
else:
is_list, features = False, None
residual = x
conv_a = self.conv_a(x)
bn_a = self.bn_a(conv_a)
relu_a = F.relu(bn_a, inplace=True)
conv_b = self.conv_b(relu_a)
bn_b = self.bn_b(conv_b)
if self.downsample is not None:
residual = self.downsample(x)
output = F.relu(residual + bn_b, inplace=True)
if is_list:
return [output] + features + [bn_a, bn_b]
else:
return output
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64*block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if isinstance(x, list):
assert len(x) == 1, 'The length of inputs must be one vs {}'.format(len(x))
x, is_list = x[0], True
else:
x, is_list = x, False
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
if is_list: x = [x]
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
if is_list:
x, features = x[0], x[1:]
else:
features = None
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls = self.classifier(x)
if is_list: return cls, features
else: return cls
def resnet_mod20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet_mod32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet_mod44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet_mod56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet_mod110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
| 5,027 | 28.928571 | 98 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/__init__.py | """The models subpackage contains definitions for the following model
architectures:
- `ResNeXt` for CIFAR10 CIFAR100
You can construct a model with random weights by calling its constructor:
.. code:: python
import models
resnext29_16_64 = models.ResNeXt29_16_64(num_classes)
resnext29_8_64 = models.ResNeXt29_8_64(num_classes)
resnet20 = models.ResNet20(num_classes)
resnet32 = models.ResNet32(num_classes)
.. ResNext: https://arxiv.org/abs/1611.05431
"""
from .resnext import resnext29_8_64, resnext29_16_64
from .resnet import resnet20, resnet32, resnet44, resnet56, resnet110
from .resnet_mod import resnet_mod20, resnet_mod32, resnet_mod44, resnet_mod56, resnet_mod110
from .preresnet import preresnet20, preresnet32, preresnet44, preresnet56, preresnet110
from .caffe_cifar import caffe_cifar
from .densenet import densenet100_12
# imagenet based resnet
from .imagenet_resnet import resnet18, resnet34, resnet50, resnet101, resnet152
# cifar based resnet
from .resnet import CifarResNet, ResNetBasicblock
# cifar based resnet pruned
from .resnet_small import resnet20_small, resnet32_small, resnet44_small, resnet56_small, resnet110_small
# imagenet based resnet pruned
# from .imagenet_resnet_small import resnet18_small, resnet34_small, resnet50_small, resnet101_small, resnet152_small
from .imagenet_resnet_small import resnet18_small, resnet34_small, resnet50_small, resnet101_small, resnet152_small
from .vgg_cifar10 import *
from .vgg import *
| 1,486 | 38.131579 | 117 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/res_utils.py | import torch
import torch.nn as nn
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class DownsampleC(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleC, self).__init__()
assert stride != 1 or nIn != nOut
self.conv = nn.Conv2d(nIn, nOut, kernel_size=1, stride=stride, padding=0, bias=False)
def forward(self, x):
x = self.conv(x)
return x
class DownsampleD(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleD, self).__init__()
assert stride == 2
self.conv = nn.Conv2d(nIn, nOut, kernel_size=2, stride=stride, padding=0, bias=False)
self.bn = nn.BatchNorm2d(nOut)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
#
#class SelectiveSequential(nn.Module):
# def __init__(self, to_select, modules_dict):
# super(SelectiveSequential, self).__init__()
# for key, module in modules_dict.items():
# self.add_module(key, module)
# self._to_select = to_select
#
# def forward(self,x):
# list = []
# for name, module in self._modules.items():
# x = module(x)
# if name in self._to_select:
# list.append(x)
# return list
#
#class FeatureExtractor(nn.Module):
# def __init__(self, submodule, extracted_layers):
# super(FeatureExtractor, self).__init__()
# self.submodule = submodule
#
# def forward(self, x):
# outputs = []
# for name, module in self.submodule._modules.items():
# x = module(x)
# if name in self.extracted_layers:
# outputs += [x]
# return outputs + [x]
#
#original_model = torchvision.models.alexnet(pretrained=True)
#
#class AlexNetConv4(nn.Module):
# def __init__(self):
# super(AlexNetConv4, self).__init__()
# self.features = nn.Sequential(
# # stop at conv4
# *list(original_model.features.children())[:-3]
# )
# def forward(self, x):
# x = self.features(x)
# return x
#
#model = AlexNetConv4()
#
#
#extract_feature = {}
#count = 0
#def save_hook(module, input, output):
## global hook_key
# global count
# temp = torch.zeros(output.size())
# temp.copy_(output.data)
# extract_feature[count] = temp
# count += 1
# print(extract_feature)
#
#class Myextract(nn.Module):
#
# def __init__(self, model):
# super(Myextract, self).__init__()
# self.model = model
# self.extract_feature = {}
# self.hook_key = {}
# self.count= 0
#
# def add_hook(self):
# for key, module in model._modules.items():
# if 'stage' in key:
# i = 1
# for block in module.children():
## self.get_key( key + '_block_' + str(i))
# self.add_hook_block(key + '_block_' + str(i), module)
# i = i+1
# print(i)
# else:
# self.get_key (key)
# module.register_forward_hook (save_hook)
# print('add hook done')
#
# def add_hook_block(self,key,module):
## module.bn_a.register_forward_hook (self.save(key+'_bn_a'))
## module.bn_b.register_forward_hook (self.save(key+'_bn_b'))
# self.get_key(key+'_bn_a')
# module.bn_a.register_forward_hook (save_hook)
# self.get_key(key+'_bn_b')
# module.bn_b.register_forward_hook (save_hook)
# print('add hook block done')
#
#
# def get_key(self, key):
# self.count += 1
# self.hook_key[self.count] = key
#
# def run(self):
# self.add_hook()
#
#
#model.layer2.conv1.register_forward_hook (hook) | 3,941 | 28.41791 | 89 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/models/vgg_cifar10.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
__all__ = ['vgg']
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class vgg(nn.Module):
def __init__(self, dataset='cifar10', depth=19, init_weights=True, cfg=None):
super(vgg, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self.cfg = cfg
self.feature = self.make_layers(cfg, True)
if dataset == 'cifar10':
num_classes = 10
elif dataset == 'cifar100':
num_classes = 100
self.classifier = nn.Sequential(
nn.Linear(cfg[-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, num_classes)
)
if init_weights:
self._initialize_weights()
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
if __name__ == '__main__':
net = vgg(depth=16)
x = Variable(torch.FloatTensor(16, 3, 40, 40))
y = net(x)
print(y.data.shape)
a = []
for x, y in enumerate(net.named_parameters()):
print(x, y[0], y[1].size())
#
# for index, m in enumerate(net.modules()):
# print(index,m)
# if isinstance(m, nn.Conv2d):
# print("conv",index, m)
# import numpy as np
# cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
#
# cfg_mask = []
# layer_id = 0
# for m in net.modules():
# if isinstance(m, nn.Conv2d):
# out_channels = m.weight.data.shape[0]
# if out_channels == cfg[layer_id]:
# cfg_mask.append(torch.ones(out_channels))
# layer_id += 1
# continue
# weight_copy = m.weight.data.abs().clone()
# weight_copy = weight_copy.cpu().numpy()
# L1_norm = np.sum(weight_copy, axis=(1, 2, 3))
# arg_max = np.argsort(L1_norm)
# arg_max_rev = arg_max[::-1][:cfg[layer_id]]
# assert arg_max_rev.size == cfg[layer_id], "size of arg_max_rev not correct"
# mask = torch.zeros(out_channels)
# mask[arg_max_rev.tolist()] = 1
# cfg_mask.append(mask)
# layer_id += 1
# elif isinstance(m, nn.MaxPool2d):
# layer_id += 1
#
# newmodel = vgg(dataset='cifar10', cfg=cfg)
# newmodel.cuda()
#
# start_mask = torch.ones(3)
# layer_id_in_cfg = 0
# end_mask = cfg_mask[layer_id_in_cfg]
# for [m0, m1] in zip(net.modules(), newmodel.modules()):
# if isinstance(m0, nn.BatchNorm2d):
# idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
# if idx1.size == 1:
# idx1 = np.resize(idx1, (1,))
# m1.weight.data = m0.weight.data[idx1.tolist()].clone()
# m1.bias.data = m0.bias.data[idx1.tolist()].clone()
# m1.running_mean = m0.running_mean[idx1.tolist()].clone()
# m1.running_var = m0.running_var[idx1.tolist()].clone()
# layer_id_in_cfg += 1
# start_mask = end_mask
# if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
# end_mask = cfg_mask[layer_id_in_cfg]
# elif isinstance(m0, nn.Conv2d):
# idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
# idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
# print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
# if idx0.size == 1:
# idx0 = np.resize(idx0, (1,))
# if idx1.size == 1:
# idx1 = np.resize(idx1, (1,))
# w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
# w1 = w1[idx1.tolist(), :, :, :].clone()
# m1.weight.data = w1.clone()
# elif isinstance(m0, nn.Linear):
# if layer_id_in_cfg == len(cfg_mask):
# idx0 = np.squeeze(np.argwhere(np.asarray(cfg_mask[-1].cpu().numpy())))
# if idx0.size == 1:
# idx0 = np.resize(idx0, (1,))
# m1.weight.data = m0.weight.data[:, idx0].clone()
# m1.bias.data = m0.bias.data.clone()
# layer_id_in_cfg += 1
# continue
# m1.weight.data = m0.weight.data.clone()
# m1.bias.data = m0.bias.data.clone()
# elif isinstance(m0, nn.BatchNorm1d):
# m1.weight.data = m0.weight.data.clone()
# m1.bias.data = m0.bias.data.clone()
# m1.running_mean = m0.running_mean.clone()
# m1.running_var = m0.running_var.clone()
# for m in net.modules():
# if isinstance(m, nn.Conv2d):
# a.append(m)
# print(m)
print(1)
| 6,395 | 37.53012 | 107 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/VGG_cifar/main_cifar_vgg.py | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import models
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar100)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=160, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save', default='./logs', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='vgg', type=str,
help='architecture to use')
parser.add_argument('--depth', default=16, type=int,
help='depth of the neural network')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
def train(epoch):
model.train()
avg_loss = 0.
train_acc = 0.
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.data[0]
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
def save_checkpoint(state, is_best, filepath):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
if epoch in [args.epochs*0.5, args.epochs*0.75]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
train(epoch)
prec1 = test()
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best, filepath=args.save)
| 8,019 | 43.804469 | 115 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/VGG_cifar/pruning_cifar_vgg.py | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import os, sys, shutil, time, random
from scipy.spatial import distance
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import models
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, default='cifar100', help='training dataset (default: cifar100)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=160, metavar='N', help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_path', default='./logs', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='vgg', type=str, help='architecture to use')
parser.add_argument('--depth', default=16, type=int, help='depth of the neural network')
# compress rate
parser.add_argument('--rate_norm', type=float, default=0.9, help='the remaining ratio of pruning based on Norm')
parser.add_argument('--rate_dist', type=float, default=0.1, help='the reducing ratio of pruning based on Distance')
# compress parameter
parser.add_argument('--layer_begin', type=int, default=1, help='compress layer of model')
parser.add_argument('--layer_end', type=int, default=1, help='compress layer of model')
parser.add_argument('--layer_inter', type=int, default=1, help='compress layer of model')
parser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')
parser.add_argument('--dist_type', default='l2', type=str, choices=['l2', 'l1', 'cos'], help='distance type of GM')
# pretrain model
parser.add_argument('--use_state_dict', dest='use_state_dict', action='store_true', help='use state dcit or not')
parser.add_argument('--use_pretrain', dest='use_pretrain', action='store_true', help='use pre-trained model or not')
parser.add_argument('--pretrain_path', default='', type=str, help='..path of pre-trained model')
parser.add_argument('--use_precfg', dest='use_precfg', action='store_true', help='use precfg or not')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
def main():
# Init logger
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.seed)), 'w')
print_log('save path : {}'.format(args.save_path), log)
state = {k: v for k, v in args._get_kwargs()}
print_log(state, log)
print_log("Random Seed: {}".format(args.seed), log)
print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("torch version : {}".format(torch.__version__), log)
print_log("cudnn version : {}".format(torch.backends.cudnn.version()), log)
print_log("Norm Pruning Rate: {}".format(args.rate_norm), log)
print_log("Distance Pruning Rate: {}".format(args.rate_dist), log)
print_log("Layer Begin: {}".format(args.layer_begin), log)
print_log("Layer End: {}".format(args.layer_end), log)
print_log("Layer Inter: {}".format(args.layer_inter), log)
print_log("Epoch prune: {}".format(args.epoch_prune), log)
print_log("use pretrain: {}".format(args.use_pretrain), log)
print_log("Pretrain path: {}".format(args.pretrain_path), log)
print_log("Dist type: {}".format(args.dist_type), log)
print_log("Pre cfg: {}".format(args.use_precfg), log)
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
print_log("=> network :\n {}".format(model), log)
if args.cuda:
model.cuda()
if args.use_pretrain:
if os.path.isfile(args.pretrain_path):
print_log("=> loading pretrain model '{}'".format(args.pretrain_path), log)
else:
dir = '/home/yahe/compress/filter_similarity/logs/main_2'
args.pretrain_path = dir + '/checkpoint.pth.tar'
print_log("Pretrain path: {}".format(args.pretrain_path), log)
pretrain = torch.load(args.pretrain_path)
if args.use_state_dict:
model.load_state_dict(pretrain['state_dict'])
else:
model = pretrain['state_dict']
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model = models.vgg(dataset='cifar10', depth=16, cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
if args.cuda:
model = model.cuda()
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
time1 = time.time()
test(test_loader, model, log)
time2 = time.time()
print('function took %0.3f ms' % ((time2 - time1) * 1000.0))
return
m = Mask(model)
m.init_length()
print("-" * 10 + "one epoch begin" + "-" * 10)
print("remaining ratio of pruning : Norm is %f" % args.rate_norm)
print("reducing ratio of pruning : Distance is %f" % args.rate_dist)
print("total remaining ratio is %f" % (args.rate_norm - args.rate_dist))
val_acc_1 = test(test_loader, model, log)
print(" accu before is: %.3f %%" % val_acc_1)
m.model = model
m.init_mask(args.rate_norm, args.rate_dist, args.dist_type)
# m.if_zero()
m.do_mask()
m.do_similar_mask()
model = m.model
# m.if_zero()
if args.cuda:
model = model.cuda()
val_acc_2 = test(test_loader, model, log)
print(" accu after is: %s %%" % val_acc_2)
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
if epoch in [args.epochs * 0.5, args.epochs * 0.75]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
train(train_loader, model, optimizer, epoch, log)
prec1 = test(test_loader, model, log)
if epoch % args.epoch_prune == 0 or epoch == args.epochs - 1:
m.model = model
m.if_zero()
m.init_mask(args.rate_norm, args.rate_dist, args.dist_type)
m.do_mask()
m.do_similar_mask()
# small_filter_index.append(m.filter_small_index)
# large_filter_index.append(m.filter_large_index)
# save_obj(small_filter_index, 'small_filter_index_2')
# save_obj(large_filter_index, 'large_filter_index_2')
m.if_zero()
model = m.model
if args.cuda:
model = model.cuda()
val_acc_2 = test(test_loader, model, log)
is_best = val_acc_2 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best, filepath=args.save_path)
def train(train_loader, model, optimizer, epoch, log, m=0):
model.train()
avg_loss = 0.
train_acc = 0.
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.data[0]
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print_log('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]), log)
def test(test_loader, model, log):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print_log('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)), log)
return correct / float(len(test_loader.dataset))
def save_checkpoint(state, is_best, filepath):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
class Mask:
def __init__(self, model):
self.model_size = {}
self.model_length = {}
self.compress_rate = {}
self.distance_rate = {}
self.mat = {}
self.model = model
self.mask_index = []
self.filter_small_index = {}
self.filter_large_index = {}
self.similar_matrix = {}
self.norm_matrix = {}
self.cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
def get_codebook(self, weight_torch, compress_rate, length):
weight_vec = weight_torch.view(length)
weight_np = weight_vec.cpu().numpy()
weight_abs = np.abs(weight_np)
weight_sort = np.sort(weight_abs)
threshold = weight_sort[int(length * (1 - compress_rate))]
weight_np[weight_np <= -threshold] = 1
weight_np[weight_np >= threshold] = 1
weight_np[weight_np != 1] = 0
print("codebook done")
return weight_np
def get_filter_codebook(self, weight_torch, compress_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_index = norm2_np.argsort()[:filter_pruned_num]
# norm1_sort = np.sort(norm1_np)
# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(filter_index)):
codebook[filter_index[x] * kernel_length: (filter_index[x] + 1) * kernel_length] = 0
print("filter codebook done")
else:
pass
return codebook
def get_filter_index(self, weight_torch, compress_rate, length):
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm2_np.argsort()[filter_pruned_num:]
filter_small_index = norm2_np.argsort()[:filter_pruned_num]
# norm1_sort = np.sort(norm1_np)
# threshold = norm1_sort[int (weight_torch.size()[0] * (1-compress_rate) )]
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
# print("filter index done")
else:
pass
return filter_small_index, filter_large_index
def get_filter_similar_old(self, weight_torch, compress_rate, distance_rate, length):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
similar_pruned_num = int(weight_torch.size()[0] * distance_rate)
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
# norm1 = torch.norm(weight_vec, 1, 1)
# norm1_np = norm1.cpu().numpy()
norm2 = torch.norm(weight_vec, 2, 1)
norm2_np = norm2.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm2_np.argsort()[filter_pruned_num:]
filter_small_index = norm2_np.argsort()[:filter_pruned_num]
print('weight_vec.size', weight_vec.size())
# distance using pytorch function
similar_matrix = torch.zeros((len(filter_large_index), len(filter_large_index)))
for x1, x2 in enumerate(filter_large_index):
for y1, y2 in enumerate(filter_large_index):
# cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# similar_matrix[x1, y1] = cos(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0]
pdist = torch.nn.PairwiseDistance(p=2)
# print('weight_vec[x2].size', weight_vec[x2].size())
similar_matrix[x1, y1] = pdist(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0][0]
# print('weight_vec[x2].size after', weight_vec[x2].size())
# more similar with other filter indicates large in the sum of row
similar_sum = torch.sum(torch.abs(similar_matrix), 0).numpy()
# for cos similar: get the filter index with largest similarity
# similar_pruned_num = len(similar_sum) - similar_pruned_num
# similar_large_index = similar_sum.argsort()[similar_pruned_num:]
# similar_small_index = similar_sum.argsort()[: similar_pruned_num]
# similar_index_for_filter = [filter_large_index[i] for i in similar_large_index]
# for distance similar: get the filter index with largest similarity == small distance
similar_large_index = similar_sum.argsort()[similar_pruned_num:]
similar_small_index = similar_sum.argsort()[: similar_pruned_num]
similar_index_for_filter = [filter_large_index[i] for i in similar_small_index]
print('filter_large_index', filter_large_index)
print('filter_small_index', filter_small_index)
print('similar_sum', similar_sum)
print('similar_large_index', similar_large_index)
print('similar_small_index', similar_small_index)
print('similar_index_for_filter', similar_index_for_filter)
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(similar_index_for_filter)):
codebook[
similar_index_for_filter[x] * kernel_length: (similar_index_for_filter[x] + 1) * kernel_length] = 0
print("similar index done")
else:
pass
return codebook
# optimize for fast ccalculation
def get_filter_similar(self, weight_torch, compress_rate, distance_rate, length, dist_type="l2"):
codebook = np.ones(length)
if len(weight_torch.size()) == 4:
filter_pruned_num = int(weight_torch.size()[0] * (1 - compress_rate))
similar_pruned_num = int(weight_torch.size()[0] * distance_rate)
weight_vec = weight_torch.view(weight_torch.size()[0], -1)
if dist_type == "l2" or "cos":
norm = torch.norm(weight_vec, 2, 1)
norm_np = norm.cpu().numpy()
elif dist_type == "l1":
norm = torch.norm(weight_vec, 1, 1)
norm_np = norm.cpu().numpy()
filter_small_index = []
filter_large_index = []
filter_large_index = norm_np.argsort()[filter_pruned_num:]
filter_small_index = norm_np.argsort()[:filter_pruned_num]
# # distance using pytorch function
# similar_matrix = torch.zeros((len(filter_large_index), len(filter_large_index)))
# for x1, x2 in enumerate(filter_large_index):
# for y1, y2 in enumerate(filter_large_index):
# # cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
# # similar_matrix[x1, y1] = cos(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0]
# pdist = torch.nn.PairwiseDistance(p=2)
# similar_matrix[x1, y1] = pdist(weight_vec[x2].view(1, -1), weight_vec[y2].view(1, -1))[0][0]
# # more similar with other filter indicates large in the sum of row
# similar_sum = torch.sum(torch.abs(similar_matrix), 0).numpy()
# distance using numpy function
indices = torch.LongTensor(filter_large_index).cuda()
weight_vec_after_norm = torch.index_select(weight_vec, 0, indices).cpu().numpy()
# for euclidean distance
if dist_type == "l2" or "l1":
similar_matrix = distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'euclidean')
elif dist_type == "cos": # for cos similarity
similar_matrix = 1 - distance.cdist(weight_vec_after_norm, weight_vec_after_norm, 'cosine')
similar_sum = np.sum(np.abs(similar_matrix), axis=0)
# print('similar_matrix 1',similar_matrix.cpu().numpy())
# print('similar_matrix 2', similar_matrix_2)
# # print('similar_matrix 3', similar_matrix_3)
# result = np.absolute(similar_matrix.cpu().numpy() - similar_matrix_2)
# print('result',result)
# print('similar_matrix',similar_matrix.cpu().numpy())
# print('similar_matrix_2', similar_matrix_2)
# print('result', similar_matrix.cpu().numpy()-similar_matrix_2)
# print('similar_sum',similar_sum)
# print('similar_sum_2', similar_sum_2)
# print('result sum', similar_sum-similar_sum_2)
# for cos similar: get the filter index with largest similarity
# similar_pruned_num = len(similar_sum) - similar_pruned_num
# similar_large_index = similar_sum.argsort()[similar_pruned_num:]
# similar_small_index = similar_sum.argsort()[: similar_pruned_num]
# similar_index_for_filter = [filter_large_index[i] for i in similar_large_index]
# for distance similar: get the filter index with largest similarity == small distance
similar_large_index = similar_sum.argsort()[similar_pruned_num:]
similar_small_index = similar_sum.argsort()[: similar_pruned_num]
similar_index_for_filter = [filter_large_index[i] for i in similar_small_index]
print('filter_large_index', filter_large_index)
print('filter_small_index', filter_small_index)
print('similar_sum', similar_sum)
print('similar_large_index', similar_large_index)
print('similar_small_index', similar_small_index)
print('similar_index_for_filter', similar_index_for_filter)
kernel_length = weight_torch.size()[1] * weight_torch.size()[2] * weight_torch.size()[3]
for x in range(0, len(similar_index_for_filter)):
codebook[
similar_index_for_filter[x] * kernel_length: (similar_index_for_filter[x] + 1) * kernel_length] = 0
print("similar index done")
else:
pass
return codebook
def convert2tensor(self, x):
x = torch.FloatTensor(x)
return x
def init_length(self):
for index, item in enumerate(self.model.parameters()):
self.model_size[index] = item.size()
for index1 in self.model_size:
for index2 in range(0, len(self.model_size[index1])):
if index2 == 0:
self.model_length[index1] = self.model_size[index1][0]
else:
self.model_length[index1] *= self.model_size[index1][index2]
def init_rate(self, rate_norm_per_layer, rate_dist_per_layer, pre_cfg=True):
if args.arch == 'vgg':
cfg = [32, 64, 128, 128, 256, 256, 256, 256, 256, 256, 256, 256, 256]
cfg_index = 0
for index, item in enumerate(self.model.named_parameters()):
self.compress_rate[index] = 1
self.distance_rate[index] = 1
if len(item[1].size()) == 4:
print(item[1].size())
if not pre_cfg:
self.compress_rate[index] = rate_norm_per_layer
self.distance_rate[index] = rate_dist_per_layer
self.mask_index.append(index)
print(item[0], "self.mask_index", self.mask_index)
else:
self.compress_rate[index] = rate_norm_per_layer
self.distance_rate[index] = 1 - cfg[cfg_index] / item[1].size()[0]
self.mask_index.append(index)
print(item[0], "self.mask_index", self.mask_index, cfg_index, cfg[cfg_index], item[1].size()[0],
self.distance_rate[index], )
print("self.distance_rate", self.distance_rate)
cfg_index += 1
# for key in range(args.layer_begin, args.layer_end + 1, args.layer_inter):
# self.compress_rate[key] = rate_norm_per_layer
# self.distance_rate[key] = rate_dist_per_layer
# different setting for different architecture
# if args.arch == 'resnet20':
# last_index = 57
# elif args.arch == 'resnet32':
# last_index = 93
# elif args.arch == 'resnet56':
# last_index = 165
# elif args.arch == 'resnet110':
# last_index = 327
# # to jump the last fc layer
# self.mask_index = [x for x in range(0, last_index, 3)]
def init_mask(self, rate_norm_per_layer, rate_dist_per_layer, dist_type):
self.init_rate(rate_norm_per_layer, rate_dist_per_layer, pre_cfg=args.use_precfg)
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
# mask for norm criterion
self.mat[index] = self.get_filter_codebook(item.data, self.compress_rate[index],
self.model_length[index])
self.mat[index] = self.convert2tensor(self.mat[index])
if args.cuda:
self.mat[index] = self.mat[index].cuda()
# # get result about filter index
# self.filter_small_index[index], self.filter_large_index[index] = \
# self.get_filter_index(item.data, self.compress_rate[index], self.model_length[index])
# mask for distance criterion
self.similar_matrix[index] = self.get_filter_similar(item.data, self.compress_rate[index],
self.distance_rate[index],
self.model_length[index], dist_type=dist_type)
self.similar_matrix[index] = self.convert2tensor(self.similar_matrix[index])
if args.cuda:
self.similar_matrix[index] = self.similar_matrix[index].cuda()
print("mask Ready")
def do_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.mat[index]
item.data = b.view(self.model_size[index])
print("mask Done")
def do_similar_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.data.view(self.model_length[index])
b = a * self.similar_matrix[index]
item.data = b.view(self.model_size[index])
print("mask similar Done")
def do_grad_mask(self):
for index, item in enumerate(self.model.parameters()):
if index in self.mask_index:
a = item.grad.data.view(self.model_length[index])
# reverse the mask of model
# b = a * (1 - self.mat[index])
b = a * self.mat[index]
b = b * self.similar_matrix[index]
item.grad.data = b.view(self.model_size[index])
# print("grad zero Done")
def if_zero(self):
for index, item in enumerate(self.model.parameters()):
if (index in self.mask_index):
# if index == 0:
a = item.data.view(self.model_length[index])
b = a.cpu().numpy()
print(
"number of nonzero weight is %d, zero is %d" % (np.count_nonzero(b), len(b) - np.count_nonzero(b)))
if __name__ == '__main__':
main()
| 29,830 | 48.470978 | 120 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/VGG_cifar/main_cifar_vgg_log.py | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import os, sys, shutil, time, random
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import models
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar100)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=160, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_path', default='./logs', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='vgg', type=str,
help='architecture to use')
parser.add_argument('--depth', default=16, type=int,
help='depth of the neural network')
parser.add_argument('--use_scratch', dest='use_scratch', action='store_true', help='save scratch model or not')
parser.add_argument('--train_scratch', default='', type=str, metavar='PATH', help='train the small scratch model')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
def main():
# Init logger
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = open(os.path.join(args.save_path, 'log_seed_{}.txt'.format(args.seed)), 'w')
print_log('save path : {}'.format(args.save_path), log)
state = {k: v for k, v in args._get_kwargs()}
print_log(state, log)
print_log("Random Seed: {}".format(args.seed), log)
print_log("python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("torch version : {}".format(torch.__version__), log)
print_log("cudnn version : {}".format(torch.backends.cudnn.version()), log)
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
print_log("=> creating model '{}'".format(args.arch), log)
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
print_log("=> network :\n {}".format(model), log)
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# for training from scratch
if args.use_scratch and not args.train_scratch:
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
save_checkpoint({
'epoch': 0,
'state_dict': model.state_dict(),
'best_prec1': 0,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best=0, filepath=args.save_path)
return
elif args.train_scratch:
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth,
cfg=[32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256,
256, 256])
if os.path.isfile(args.train_scratch):
print("=> loading pruned scratch model '{}'".format(args.train_scratch))
checkpoint = torch.load(args.train_scratch)
model.load_state_dict(checkpoint['state_dict'])
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
else:
print("=> no pruned scratch model found at '{}'".format(args.train_scratch))
else:
pass
if args.cuda:
model.cuda()
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
if epoch in [args.epochs * 0.5, args.epochs * 0.75]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
train(train_loader, model, optimizer, epoch, log)
prec1 = test(test_loader, model, log)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best, filepath=args.save_path)
def train(train_loader, model, optimizer, epoch, log, m=0):
model.train()
avg_loss = 0.
train_acc = 0.
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.data[0]
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print_log('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]), log)
def test(test_loader, model, log):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print_log('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)), log)
return correct / float(len(test_loader.dataset))
def save_checkpoint(state, is_best, filepath):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
if __name__ == '__main__':
main()
| 10,842 | 44.179167 | 121 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/VGG_cifar/PFEC_vggprune.py | import argparse
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models import *
# Prune settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')
parser.add_argument('--dataset', type=str, default='cifar10',
help='training dataset (default: cifar10)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--depth', type=int, default=16,
help='depth of the vgg')
parser.add_argument('--model', default='', type=str, metavar='PATH',
help='path to the model (default: none)')
parser.add_argument('--save', default='.', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = vgg(dataset=args.dataset, depth=args.depth)
if args.cuda:
model.cuda()
if args.model:
if os.path.isfile(args.model):
print("=> loading checkpoint '{}'".format(args.model))
checkpoint = torch.load(args.model)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.model, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
print('Pre-processing Successful!')
# simple test model after Pre-processing prune (simple set BN scales to zeros)
def test(model):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data/cifar.python', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'cifar100':
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data/cifar.python', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
raise ValueError("No valid dataset is given.")
model.eval()
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format(
correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
acc = test(model)
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
cfg_mask = []
layer_id = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
out_channels = m.weight.data.shape[0]
if out_channels == cfg[layer_id]:
cfg_mask.append(torch.ones(out_channels))
layer_id += 1
continue
weight_copy = m.weight.data.abs().clone()
weight_copy = weight_copy.cpu().numpy()
L1_norm = np.sum(weight_copy, axis=(1, 2, 3))
arg_max = np.argsort(L1_norm)
arg_max_rev = arg_max[::-1][:cfg[layer_id]]
assert arg_max_rev.size == cfg[layer_id], "size of arg_max_rev not correct"
mask = torch.zeros(out_channels)
mask[arg_max_rev.tolist()] = 1
cfg_mask.append(mask)
layer_id += 1
elif isinstance(m, nn.MaxPool2d):
layer_id += 1
newmodel = vgg(dataset=args.dataset, cfg=cfg)
if args.cuda:
newmodel.cuda()
start_mask = torch.ones(3)
layer_id_in_cfg = 0
end_mask = cfg_mask[layer_id_in_cfg]
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.BatchNorm2d):
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1,(1,))
m1.weight.data = m0.weight.data[idx1.tolist()].clone()
m1.bias.data = m0.bias.data[idx1.tolist()].clone()
m1.running_mean = m0.running_mean[idx1.tolist()].clone()
m1.running_var = m0.running_var[idx1.tolist()].clone()
layer_id_in_cfg += 1
start_mask = end_mask
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
m1.weight.data = w1.clone()
elif isinstance(m0, nn.Linear):
if layer_id_in_cfg == len(cfg_mask):
idx0 = np.squeeze(np.argwhere(np.asarray(cfg_mask[-1].cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
m1.weight.data = m0.weight.data[:, idx0].clone()
m1.bias.data = m0.bias.data.clone()
layer_id_in_cfg += 1
continue
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
elif isinstance(m0, nn.BatchNorm1d):
m1.weight.data = m0.weight.data.clone()
m1.bias.data = m0.bias.data.clone()
m1.running_mean = m0.running_mean.clone()
m1.running_var = m0.running_var.clone()
torch.save({'cfg': cfg, 'state_dict': newmodel.state_dict()}, os.path.join(args.save, 'pruned.pth.tar'))
print(newmodel)
model = newmodel
acc = test(model)
num_parameters = sum([param.nelement() for param in newmodel.parameters()])
with open(os.path.join(args.save, "prune.txt"), "w") as fp:
fp.write("Number of parameters: \n"+str(num_parameters)+"\n")
fp.write("Test accuracy: \n"+str(acc)+"\n")
| 6,938 | 40.550898 | 104 | py |
filter-pruning-geometric-median | filter-pruning-geometric-median-master/VGG_cifar/PFEC_finetune.py | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import models
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR training')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar100)')
parser.add_argument('--refine', default='', type=str, metavar='PATH',
help='path to the pruned model to be fine tuned')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=40, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save', default='./logs', type=str, metavar='PATH',
help='path to save prune model (default: current directory)')
parser.add_argument('--arch', default='vgg', type=str,
help='architecture to use')
parser.add_argument('--depth', default=16, type=int,
help='depth of the neural network')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data/cifar.python', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data/cifar.python', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data/cifar.python', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data/cifar.python', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
if args.refine:
checkpoint = torch.load(args.refine)
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth, cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
def train(epoch):
model.train()
avg_loss = 0.
train_acc = 0.
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.data[0]
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct / float(len(test_loader.dataset))
def save_checkpoint(state, is_best, filepath):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
train(epoch)
prec1 = test()
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best, filepath=args.save)
| 8,193 | 44.021978 | 115 | py |
Im2Hands | Im2Hands-main/init_occ_train.py | import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
import os
import sys
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib; matplotlib.use('Agg')
from torch.utils.tensorboard import SummaryWriter
from artihand import config, data
from artihand.checkpoints import CheckpointIO
def init_weights(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
# Arguments
parser = argparse.ArgumentParser(
description='Train a deep structured implicit function model for hand reconstruction.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/init_occ/init_occ.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/init_occ/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
ts = t0
# Shorthands
out_dir = cfg['training']['out_dir']
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Dataset
train_dataset = config.get_dataset('train', cfg)
val_dataset = config.get_dataset('val', cfg, splits=5000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=4, shuffle=False,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
# Model
model = config.get_model(cfg, device=device, dataset=train_dataset)
model = model.to('cuda')
model.apply(init_weights)
# Intialize training
npoints = 1000
optimizer = optim.Adam(model.parameters(), lr=1e-4 * 0.1)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
# Load pre-trained model is existing
kwargs = {
'model': model,
'optimizer': optimizer,
}
checkpoint_io = CheckpointIO(
out_dir, initialize_from=cfg['model']['initialize_from'],
initialization_file_name=cfg['model']['initialization_file_name'],
**kwargs)
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
checkpoint_io.load('halo_baseline.pt')
checkpoint_io.load('intaghand_baseline.pth')
load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
logger = SummaryWriter(os.path.join(out_dir, 'logs'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print(model)
print('Total number of parameters: %d' % nparameters)
while True:
epoch_it += 1
for batch in train_loader:
it += 1
loss_dict = trainer.train_step(batch)
loss = loss_dict['total']
for k, v in loss_dict.items():
logger.add_scalar('train/loss/%s' % k, v, it)
# Print output
if print_every > 0 and (it % print_every) == 0:
print('[Epoch %02d] it=%03d, loss=%.4f'
% (epoch_it, it, loss))
print('time per batch: %.2f, total time: %.2f'
% (time.time() - ts, time.time() - t0))
ts = time.time()
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
eval_dict = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k, v in eval_dict.items():
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
| 6,151 | 32.98895 | 112 | py |
Im2Hands | Im2Hands-main/ref_occ_train.py | import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
import os
import sys
import time
import argparse
import torch
import torch.optim as optim
import numpy as np
import matplotlib; matplotlib.use('Agg')
from torch.utils.tensorboard import SummaryWriter
from artihand import config, data
from artihand.checkpoints import CheckpointIO
# Arguments
parser = argparse.ArgumentParser(
description='Train a deep structured implicit function model for hand reconstruction.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/ref_occ/ref_occ.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/ref_occ/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
ts = t0
# Shorthands
out_dir = cfg['training']['out_dir']
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Dataset
train_dataset = config.get_dataset('train', cfg)
val_dataset = config.get_dataset('val', cfg, splits=2000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, num_workers=4, shuffle=False,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
# Model
model = config.get_model(cfg, device=device, dataset=train_dataset)
model = model.to('cuda')
# Intialize training
npoints = 1000
optimizer = optim.Adam(model.parameters(), lr=0.0001, betas=(0.9,0.999), eps=1e-08, amsgrad=False, weight_decay=1e-5)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
kwargs = {
'model': model,
'optimizer': optimizer,
}
checkpoint_io = CheckpointIO(
out_dir, initialize_from=cfg['model']['initialize_from'],
initialization_file_name=cfg['model']['initialization_file_name'],
**kwargs)
checkpoint_io.load('init_occ.pt', strict=True)
checkpoint_io.load('model_best.pt', strict=True) # WARNING!
load_dict = {}
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1) - 1
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
metric_val_best = -1
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5000,
gamma=0.2, last_epoch=epoch_it)
logger = SummaryWriter(os.path.join(out_dir, 'logs_pen'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print(model)
print('Total number of parameters: %d' % nparameters)
while True:
epoch_it += 1
for batch in train_loader:
scheduler.step()
it += 1
loss_dict = trainer.train_step(batch)
loss = loss_dict['total']
for k, v in loss_dict.items():
logger.add_scalar('train/loss/%s' % k, v, it)
# Print output
if print_every > 0 and (it % print_every) == 0:
print('[Epoch %02d] it=%03d, loss=%.4f'
% (epoch_it, it, loss))
print('time per batch: %.2f, total time: %.2f'
% (time.time() - ts, time.time() - t0))
ts = time.time()
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if it % backup_every == 0:
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
eval_dict = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k, v in eval_dict.items():
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
| 5,904 | 33.735294 | 117 | py |
Im2Hands | Im2Hands-main/kpts_ref_generate.py | import os
import sys
import time
import torch
import shutil
import trimesh
import argparse
import pandas as pd
import numpy as np
import open3d as o3d
from tqdm import tqdm
from collections import defaultdict
from artihand import config, data
from artihand.checkpoints import CheckpointIO
from artihand.nasa.kpts_ref_training import preprocess_joints
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
2
parser = argparse.ArgumentParser(
description='Extract meshes from occupancy process.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/kpts_ref/kpts_ref.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--latest', action='store_true', help='Use latest model instead of best.')
parser.add_argument('--subset', type=str, default='test', choices=['train', 'val', 'test'], help='Dataset subset')
parser.add_argument('--out_dir', type=str, default='/data/hand_data/kpts_ref', help='Path to output directory to store intermediate results.')
parser.add_argument('--split_idx', type=int, default=0, help='Dataset split index. (-1: no split)')
parser.add_argument('--splits', type=int, default=1000, help='Dataset split index. (-1: no split)')
if __name__ == '__main__':
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/kpts_ref/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
out_dir = cfg['training']['out_dir']
generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
if args.latest:
generation_dir = generation_dir + '_latest'
out_time_file = os.path.join(generation_dir, 'time_generation_full.pkl')
out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl')
batch_size = cfg['generation']['batch_size']
input_type = cfg['data']['input_type']
dataset = config.get_dataset(args.subset, cfg, splits=args.splits, split_idx=args.split_idx)
# Model
model = config.get_model(cfg, device=device, dataset=dataset)
checkpoint_io = CheckpointIO(out_dir, model=model)
if args.latest:
checkpoint_io.load('best_model.pt')
else:
checkpoint_io.load(cfg['test']['model_file'])
# Generator
generator = config.get_generator(model, cfg, device=device)
# Loader
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, num_workers=1, shuffle=False)
# Statistics
time_dicts = []
# Generate
model.eval()
args.out_dir = os.path.join(args.out_dir, args.subset)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
eval_list = defaultdict(list)
for i, data in enumerate(tqdm(test_loader)):
eval_dict = {}
img, camera_params, mano_data, idx = data
joints_gt = {'left': mano_data['left'].get('joints').to(device),
'right': mano_data['right'].get('joints').to(device)}
joints = {'left': mano_data['left'].get('pred_joints').to(device),
'right': mano_data['right'].get('pred_joints').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['left'].get('root_rot_mat').to(device)}
kwargs = {}
# joint space conversion & normalization
left_joints, right_joints, left_norm = preprocess_joints(joints['left'], joints['right'], camera_params, root_rot_mat, return_mid=True)
left_joints_gt, right_joints_gt = preprocess_joints(joints_gt['left'], joints_gt['right'], camera_params, root_rot_mat)
in_joints = {'left': left_joints, 'right': right_joints}
with torch.no_grad():
left_joints_pred, right_joints_pred = model(img, camera_params, in_joints, **kwargs)
left_joints = left_joints_pred/1000 + left_norm
right_joints = right_joints_pred/1000 + left_norm
left_joints = torch.bmm(left_joints, camera_params['R'].double().cuda())
left_joints = left_joints + camera_params['left_root_xyz'].cuda().unsqueeze(1) * torch.Tensor([-1., 1., 1.]).cuda()
right_joints = torch.bmm(right_joints, camera_params['R'].double().cuda())
right_joints = right_joints + camera_params['right_root_xyz'].cuda().unsqueeze(1)
left_joints_out_path = os.path.join(args.out_dir, '%07d_left.ply' % idx)
right_joints_out_path = os.path.join(args.out_dir, '%07d_right.ply' % idx)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(left_joints.detach().cpu().numpy()[0])
o3d.io.write_point_cloud(left_joints_out_path, pcd)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(right_joints.detach().cpu().numpy()[0])
o3d.io.write_point_cloud(right_joints_out_path, pcd)
| 4,947 | 37.96063 | 143 | py |
Im2Hands | Im2Hands-main/kpts_ref_train.py | import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
import os
import sys
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib; matplotlib.use('Agg')
from torch.utils.tensorboard import SummaryWriter
from artihand import config, data
from artihand.checkpoints import CheckpointIO
def init_weights(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
# Arguments
parser = argparse.ArgumentParser(
description='Train a deep structured implicit function model for hand reconstruction.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/kpts_ref/kpts_ref.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/kpts_ref/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
ts = t0
# Shorthands
out_dir = cfg['training']['out_dir']
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Dataset
train_dataset = config.get_dataset('train', cfg)
val_dataset = config.get_dataset('val', cfg, splits=1000)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=10, num_workers=4, shuffle=False,
collate_fn=data.collate_remove_none,
worker_init_fn=data.worker_init_fn)
# Model
model = config.get_model(cfg, device=device, dataset=train_dataset)
model = model.to('cuda')
model.apply(init_weights)
# Intialize training
npoints = 1000
optimizer = optim.Adam(model.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.3)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
# Load pre-trained model is existing
kwargs = {
'model': model,
'optimizer': optimizer,
}
checkpoint_io = CheckpointIO(
out_dir, initialize_from=cfg['model']['initialize_from'],
initialization_file_name=cfg['model']['initialization_file_name'],
**kwargs)
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
checkpoint_io.load('intaghand_baseline.pth')
load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
logger = SummaryWriter(os.path.join(out_dir, 'logs_3'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print(model)
print('Total number of parameters: %d' % nparameters)
while True:
epoch_it += 1
for batch in train_loader:
it += 1
loss_dict = trainer.train_step(batch)
loss = loss_dict['total']
for k, v in loss_dict.items():
logger.add_scalar('train/loss/%s' % k, v, it)
# Print output
if print_every > 0 and (it % print_every) == 0:
print('[Epoch %02d] it=%03d, loss=%.4f'
% (epoch_it, it, loss))
print('time per batch: %.2f, total time: %.2f'
% (time.time() - ts, time.time() - t0))
ts = time.time()
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
eval_dict = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
print(eval_dict)
for k, v in eval_dict.items():
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
| 6,214 | 33.148352 | 112 | py |
Im2Hands | Im2Hands-main/init_occ_generate.py | import os
import sys
import time
import torch
import shutil
import trimesh
import argparse
import pandas as pd
import numpy as np
import open3d as o3d
from tqdm import tqdm
from collections import defaultdict
from artihand import config, data
from artihand.checkpoints import CheckpointIO
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
2
parser = argparse.ArgumentParser(
description='Extract meshes from occupancy process.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/init_occ/init_occ.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--latest', action='store_true', help='Use latest model instead of best.')
parser.add_argument('--subset', type=str, default='train', choices=['train', 'val', 'test'], help='Dataset subset')
parser.add_argument('--out_dir', type=str, default='/data/hand_data/initial_mesh_prediction_shape_att_check2', help='Path to output directory to store intermediate results.')
parser.add_argument('--split_idx', type=int, default=0, help='Dataset split index. (-1: no split)')
parser.add_argument('--splits', type=int, default=4, help='Dataset split index. (-1: no split)')
if __name__ == '__main__':
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/init_occ/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
out_dir = cfg['training']['out_dir']
generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
if args.latest:
generation_dir = generation_dir + '_latest'
out_time_file = os.path.join(generation_dir, 'time_generation_full.pkl')
out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl')
batch_size = cfg['generation']['batch_size']
input_type = cfg['data']['input_type']
dataset = config.get_dataset(args.subset, cfg, splits=args.splits, split_idx=args.split_idx)
# Model
model = config.get_model(cfg, device=device, dataset=dataset)
checkpoint_io = CheckpointIO(out_dir, model=model)
if args.latest:
checkpoint_io.load('best_model.pt')
else:
checkpoint_io.load(cfg['test']['model_file'])
# Generator
generator = config.get_generator(model, cfg, device=device)
# Determine what to generate
generate_mesh = cfg['generation']['generate_mesh']
# Loader
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, num_workers=1, shuffle=False)
# Statistics
time_dicts = []
# Generate
model.eval()
mesh_dir = os.path.join(generation_dir, 'meshes')
generation_vis_dir = os.path.join(generation_dir, 'vis', )
args.out_dir = os.path.join(args.out_dir, args.subset)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
for i, data in enumerate(tqdm(test_loader)):
img, camera_params, mano_data, idx = data
# Create directories if necessary
if generate_mesh and not os.path.exists(mesh_dir):
os.makedirs(mesh_dir)
# Generate outputs
out_file_dict = {}
if generate_mesh:
out = generator.init_occ_generate_mesh(data)
# Get statistics
left_mesh, right_mesh = out
# Write output
left_mesh_out_path = os.path.join(args.out_dir, '%07d_left.obj' % idx)
right_mesh_out_path = os.path.join(args.out_dir, '%07d_right.obj' % idx)
print(f"Generating {left_mesh_out_path}...")
print(f"Generating {right_mesh_out_path}...")
os.makedirs(os.path.dirname(left_mesh_out_path), exist_ok=True)
# Save left hand mesh
left_mesh.vertices = left_mesh.vertices * 0.4
left_mesh.vertices = torch.matmul(mano_data['left']['root_rot_mat'].squeeze().cpu().T, xyz_to_xyz1(torch.Tensor(left_mesh.vertices)).unsqueeze(-1))[:, :3, 0]
left_mesh.vertices = left_mesh.vertices * 1000
left_mesh.vertices = left_mesh.vertices * [-1, 1, 1]
trimesh.repair.fix_inversion(left_mesh)
left_mesh.export(left_mesh_out_path)
# Save right hand mesh
right_mesh.vertices = right_mesh.vertices * 0.4
right_mesh.vertices = torch.matmul(mano_data['right']['root_rot_mat'].squeeze().cpu().T, xyz_to_xyz1(torch.Tensor(right_mesh.vertices)).unsqueeze(-1))[:, :3, 0]
right_mesh.vertices = right_mesh.vertices * 1000
right_mesh.export(right_mesh_out_path)
| 4,640 | 35.543307 | 174 | py |
Im2Hands | Im2Hands-main/ref_occ_generate.py | import os
import sys
import time
import torch
import shutil
import trimesh
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from artihand import config, data
from artihand.checkpoints import CheckpointIO
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
parser = argparse.ArgumentParser(
description='Extract meshes from occupancy process.'
)
parser.add_argument('--config', type=str, help='Path to config file.', default='configs/ref_occ/ref_occ.yaml')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--latest', action='store_true', help='Use latest model instead of best.')
parser.add_argument('--subset', type=str, default='test', choices=['train', 'val', 'test'], help='Dataset subset')
parser.add_argument('--out_dir', type=str, default='/data/hand_data/ref_occ_vis', help='Path to output directory to store intermediate results.')
parser.add_argument('--split_idx', type=int, default=0, help='Dataset split index. (-1: no split)')
parser.add_argument('--splits', type=int, default=1000, help='Dataset split index. (-1: no split)')
if __name__ == '__main__':
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/ref_occ/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
out_dir = cfg['training']['out_dir']
generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
if args.latest:
generation_dir = generation_dir + '_latest'
out_time_file = os.path.join(generation_dir, 'time_generation_full.pkl')
out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl')
batch_size = cfg['generation']['batch_size']
input_type = cfg['data']['input_type']
dataset = config.get_dataset(args.subset, cfg, splits=args.splits, split_idx=args.split_idx)
# Model
model = config.get_model(cfg, device=device, dataset=dataset)
checkpoint_io = CheckpointIO(out_dir, model=model)
if args.latest:
checkpoint_io.load('best_model.pt')
else:
checkpoint_io.load(cfg['test']['model_file'])
# Generator
generator = config.get_generator(model, cfg, device=device)
# Determine what to generate
generate_mesh = cfg['generation']['generate_mesh']
# Loader
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, num_workers=1, shuffle=False)
# Statistics
time_dicts = []
# Generate
model.eval()
mesh_dir = os.path.join(generation_dir, 'meshes')
generation_vis_dir = os.path.join(generation_dir, 'vis', )
args.out_dir = os.path.join(args.out_dir, args.subset)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
for i, data in enumerate(tqdm(test_loader)):
img, camera_params, mano_data, idx = data
# Create directories if necessary
if generate_mesh and not os.path.exists(mesh_dir):
os.makedirs(mesh_dir)
# Generate outputs
out_file_dict = {}
if generate_mesh:
out = generator.ref_occ_generate_mesh(data)
# Get statistics
left_mesh, right_mesh = out
# Write output
left_mesh_out_path = os.path.join(args.out_dir, '%07d_left.obj' % idx)
right_mesh_out_path = os.path.join(args.out_dir, '%07d_right.obj' % idx)
print(f"Generating {left_mesh_out_path}...")
print(f"Generating {right_mesh_out_path}...")
os.makedirs(os.path.dirname(left_mesh_out_path), exist_ok=True)
left_mid_joint = torch.matmul(mano_data['left']['root_rot_mat'].squeeze().cpu(), xyz_to_xyz1(mano_data['left']['mid_joint'] * torch.Tensor([-1., 1., 1.])).unsqueeze(-1).float())[:, :3, 0]
right_mid_joint = torch.matmul(mano_data['right']['root_rot_mat'].squeeze().cpu(), xyz_to_xyz1(mano_data['right']['mid_joint']).unsqueeze(-1).float())[:, :3, 0]
# Save left hand mesh
left_mesh.vertices = left_mesh.vertices * 0.4
#left_mesh.vertices = left_mesh.vertices - left_mid_joint.cpu().numpy()
left_mesh.vertices = torch.matmul(mano_data['left']['root_rot_mat'].squeeze().cpu().T, xyz_to_xyz1(torch.Tensor(left_mesh.vertices)).unsqueeze(-1))[:, :3, 0]
left_mesh.vertices = left_mesh.vertices + camera_params['left_root_xyz'].cpu().numpy()
left_mesh.vertices = left_mesh.vertices * [-1, 1, 1]
trimesh.repair.fix_inversion(left_mesh)
left_mesh.export(left_mesh_out_path)
# Save right hand mesh
right_mesh.vertices = right_mesh.vertices * 0.4
#right_mesh.vertices = right_mesh.vertices - right_mid_joint.cpu().numpy()
right_mesh.vertices = torch.matmul(mano_data['right']['root_rot_mat'].squeeze().cpu().T, xyz_to_xyz1(torch.Tensor(right_mesh.vertices)).unsqueeze(-1))[:, :3, 0]
right_mesh.vertices = right_mesh.vertices + camera_params['right_root_xyz'].cpu().numpy()
right_mesh.export(right_mesh_out_path)
import cv2
img = cv2.imread(camera_params['img_path'][0])
cv2.imwrite(os.path.join(args.out_dir, '%07d_img.png' % idx), img)
| 5,374 | 39.11194 | 199 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/dataset/interhand.py | import json
import os.path as osp
from tqdm import tqdm
import cv2 as cv
import numpy as np
import torch
import pickle
from glob import glob
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from models.manolayer import ManoLayer, rodrigues_batch
from dataset.dataset_utils import IMG_SIZE, HAND_BBOX_RATIO, HEATMAP_SIGMA, HEATMAP_SIZE, cut_img
from dataset.heatmap import HeatmapGenerator
from utils.vis_utils import mano_two_hands_renderer
from utils.utils import get_mano_path
def fix_shape(mano_layer):
if torch.sum(torch.abs(mano_layer['left'].shapedirs[:, 0, :] - mano_layer['right'].shapedirs[:, 0, :])) < 1:
print('Fix shapedirs bug of MANO')
mano_layer['left'].shapedirs[:, 0, :] *= -1
class InterHandLoader():
def __init__(self, data_path, split='train', mano_path=None):
assert split in ['train', 'test', 'val']
self.root_path = data_path
self.img_root_path = os.path.join(self.root_path, 'images')
self.annot_root_path = os.path.join(self.root_path, 'annotations')
self.mano_layer = {'right': ManoLayer(mano_path['right'], center_idx=None),
'left': ManoLayer(mano_path['left'], center_idx=None)}
fix_shape(self.mano_layer)
self.split = split
with open(osp.join(self.annot_root_path, self.split,
'InterHand2.6M_' + self.split + '_data.json')) as f:
self.data_info = json.load(f)
with open(osp.join(self.annot_root_path, self.split,
'InterHand2.6M_' + self.split + '_camera.json')) as f:
self.cam_params = json.load(f)
with open(osp.join(self.annot_root_path, self.split,
'InterHand2.6M_' + self.split + '_joint_3d.json')) as f:
self.joints = json.load(f)
with open(osp.join(self.annot_root_path, self.split,
'InterHand2.6M_' + self.split + '_MANO_NeuralAnnot.json')) as f:
self.mano_params = json.load(f)
self.data_size = len(self.data_info['images'])
def __len__(self):
return self.data_size
def show_data(self, idx):
for k in self.data_info['images'][idx].keys():
print(k, self.data_info['images'][idx][k])
for k in self.data_info['annotations'][idx].keys():
print(k, self.data_info['annotations'][idx][k])
def load_camera(self, idx):
img_info = self.data_info['images'][idx]
capture_idx = img_info['capture']
cam_idx = img_info['camera']
capture_idx = str(capture_idx)
cam_idx = str(cam_idx)
cam_param = self.cam_params[str(capture_idx)]
cam_t = np.array(cam_param['campos'][cam_idx], dtype=np.float32).reshape(3)
cam_R = np.array(cam_param['camrot'][cam_idx], dtype=np.float32).reshape(3, 3)
cam_t = -np.dot(cam_R, cam_t.reshape(3, 1)).reshape(3) / 1000 # -Rt -> t
# add camera intrinsics
focal = np.array(cam_param['focal'][cam_idx], dtype=np.float32).reshape(2)
princpt = np.array(cam_param['princpt'][cam_idx], dtype=np.float32).reshape(2)
cameraIn = np.array([[focal[0], 0, princpt[0]],
[0, focal[1], princpt[1]],
[0, 0, 1]])
return cam_R, cam_t, cameraIn
def load_mano(self, idx):
img_info = self.data_info['images'][idx]
capture_idx = img_info['capture']
frame_idx = img_info['frame_idx']
capture_idx = str(capture_idx)
frame_idx = str(frame_idx)
mano_dict = {}
coord_dict = {}
for hand_type in ['left', 'right']:
try:
mano_param = self.mano_params[capture_idx][frame_idx][hand_type]
mano_pose = torch.FloatTensor(mano_param['pose']).view(-1, 3)
root_pose = mano_pose[0].view(1, 3)
hand_pose = mano_pose[1:, :].view(1, -1)
# hand_pose = hand_pose.view(1, -1, 3)
mano = self.mano_layer[hand_type]
mean_pose = mano.hands_mean
hand_pose = mano.axis2pca(hand_pose + mean_pose)
shape = torch.FloatTensor(mano_param['shape']).view(1, -1)
trans = torch.FloatTensor(mano_param['trans']).view(1, 3)
root_pose = rodrigues_batch(root_pose)
handV, handJ = self.mano_layer[hand_type](root_pose, hand_pose, shape, trans=trans)
mano_dict[hand_type] = {'R': root_pose.numpy(), 'pose': hand_pose.numpy(), 'shape': shape.numpy(), 'trans': trans.numpy()}
coord_dict[hand_type] = {'verts': handV, 'joints': handJ}
except:
mano_dict[hand_type] = None
coord_dict[hand_type] = None
return mano_dict, coord_dict
def load_img(self, idx):
img_info = self.data_info['images'][idx]
img = cv.imread(osp.join(self.img_root_path, self.split, img_info['file_name']))
return img
def cut_inter_img(loader, save_path, split):
os.makedirs(osp.join(save_path, split, 'img'), exist_ok=True)
os.makedirs(osp.join(save_path, split, 'anno'), exist_ok=True)
idx = 0
for i in tqdm(range(len(loader))):
annotation = loader.data_info['annotations'][i]
images_info = loader.data_info['images'][i]
hand_type = annotation['hand_type']
hand_type_valid = annotation['hand_type_valid']
if hand_type == 'interacting' and hand_type_valid:
mano_dict, coord_dict = loader.load_mano(i)
if coord_dict['left'] is not None and coord_dict['right'] is not None:
left = coord_dict['left']['verts'][0].detach().numpy()
right = coord_dict['right']['verts'][0].detach().numpy()
dist = np.linalg.norm(left - right, ord=2, axis=-1).min()
if dist < 9999999:
img = loader.load_img(i)
if img.mean() < 10:
continue
cam_R, cam_t, cameraIn = loader.load_camera(i)
left = left @ cam_R.T + cam_t
left2d = left @ cameraIn.T
left2d = left2d[:, :2] / left2d[:, 2:]
right = right @ cam_R.T + cam_t
right2d = right @ cameraIn.T
right2d = right2d[:, :2] / right2d[:, 2:]
[img], _, cameraIn = \
cut_img([img], [left2d, right2d], camera=cameraIn, radio=HAND_BBOX_RATIO, img_size=IMG_SIZE)
cv.imwrite(osp.join(save_path, split, 'img', '{}.jpg'.format(idx)), img)
data_info = {}
data_info['inter_idx'] = idx
data_info['image'] = images_info
data_info['annotation'] = annotation
data_info['mano_params'] = mano_dict
data_info['camera'] = {'R': cam_R, 't': cam_t, 'camera': cameraIn}
with open(osp.join(save_path, split, 'anno', '{}.pkl'.format(idx)), 'wb') as file:
pickle.dump(data_info, file)
idx = idx + 1
def select_data(DATA_PATH, save_path, split):
mano_path = {'right': '/workspace/AFOF/leap/body_models/mano/models/MANO_RIGHT.pkl',
'left': '/workspace/AFOF/leap/body_models/mano/models/MANO_LEFT.pkl'}
#loader = InterHandLoader(DATA_PATH, split=split, mano_path=get_mano_path())
loader = InterHandLoader(DATA_PATH, split=split, mano_path=mano_path)
cut_inter_img(loader, save_path, split)
def render_data(save_path, split):
mano_path = {'right': '/workspace/AFOF/leap/body_models/mano/models/MANO_RIGHT.pkl',
'left': '/workspace/AFOF/leap/body_models/mano/models/MANO_LEFT.pkl'}
os.makedirs(osp.join(save_path, split, 'mask'), exist_ok=True)
os.makedirs(osp.join(save_path, split, 'dense'), exist_ok=True)
os.makedirs(osp.join(save_path, split, 'hms'), exist_ok=True)
size = len(glob(osp.join(save_path, split, 'anno', '*.pkl')))
mano_layer = {'right': ManoLayer(mano_path['right'], center_idx=None),
'left': ManoLayer(mano_path['left'], center_idx=None)}
fix_shape(mano_layer)
renderer = mano_two_hands_renderer(img_size=IMG_SIZE, device='cuda')
hmg = HeatmapGenerator(HEATMAP_SIZE, HEATMAP_SIGMA)
for idx in tqdm(range(size)):
with open(osp.join(save_path, split, 'anno', '{}.pkl'.format(idx)), 'rb') as file:
data = pickle.load(file)
R = data['camera']['R']
T = data['camera']['t']
camera = data['camera']['camera']
verts = []
for hand_type in ['left', 'right']:
params = data['mano_params'][hand_type]
handV, handJ = mano_layer[hand_type](torch.from_numpy(params['R']).float(),
torch.from_numpy(params['pose']).float(),
torch.from_numpy(params['shape']).float(),
trans=torch.from_numpy(params['trans']).float())
handV = handV[0].numpy()
handJ = handJ[0].numpy()
handV = handV @ R.T + T
handJ = handJ @ R.T + T
handV2d = handV @ camera.T
handV2d = handV2d[:, :2] / handV2d[:, 2:]
handJ2d = handJ @ camera.T
handJ2d = handJ2d[:, :2] / handJ2d[:, 2:]
verts.append(torch.from_numpy(handV).float().cuda().unsqueeze(0))
hms = np.split(hmg(handJ2d * HEATMAP_SIZE / IMG_SIZE)[0], 7) # 21 x h x w
for hIdx in range(len(hms)):
cv.imwrite(os.path.join(save_path, split, 'hms', '{}_{}_{}.jpg'.format(idx, hIdx, hand_type)),
hms[hIdx].transpose(1, 2, 0) * 255)
img_mask = renderer.render_mask(cameras=torch.from_numpy(camera).float().cuda().unsqueeze(0),
v3d_left=verts[0], v3d_right=verts[1])
img_mask = img_mask.detach().cpu().numpy()[0] * 255
cv.imwrite(osp.join(save_path, split, 'mask', '{}.jpg'.format(idx)), img_mask)
img_dense, _ = renderer.render_densepose(cameras=torch.from_numpy(camera).float().cuda().unsqueeze(0),
v3d_left=verts[0], v3d_right=verts[1])
img_dense = img_dense.detach().cpu().numpy()[0] * 255
cv.imwrite(osp.join(save_path, split, 'dense', '{}.jpg'.format(idx)), img_dense)
class InterHand_dataset():
def __init__(self, data_path, split):
assert split in ['train', 'test', 'val']
self.split = split
mano_path = get_mano_path()
self.mano_layer = {'right': ManoLayer(mano_path['right'], center_idx=None),
'left': ManoLayer(mano_path['left'], center_idx=None)}
fix_shape(self.mano_layer)
self.data_path = data_path
self.size = len(glob(osp.join(data_path, split, 'anno', '*.pkl')))
def __len__(self):
return self.size
def __getitem__(self, idx):
img = cv.imread(osp.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx)))
mask = cv.imread(osp.join(self.data_path, self.split, 'mask', '{}.jpg'.format(idx)))
dense = cv.imread(osp.join(self.data_path, self.split, 'dense', '{}.jpg'.format(idx)))
with open(os.path.join(self.data_path, self.split, 'anno', '{}.pkl'.format(idx)), 'rb') as file:
data = pickle.load(file)
R = data['camera']['R']
T = data['camera']['t']
camera = data['camera']['camera']
hand_dict = {}
for hand_type in ['left', 'right']:
hms = []
for hIdx in range(7):
hm = cv.imread(os.path.join(self.data_path, self.split, 'hms', '{}_{}_{}.jpg'.format(idx, hIdx, hand_type)))
hm = cv.resize(hm, (img.shape[1], img.shape[0]))
hms.append(hm)
params = data['mano_params'][hand_type]
handV, handJ = self.mano_layer[hand_type](torch.from_numpy(params['R']).float(),
torch.from_numpy(params['pose']).float(),
torch.from_numpy(params['shape']).float(),
trans=torch.from_numpy(params['trans']).float())
handV = handV[0].numpy()
handJ = handJ[0].numpy()
handV = handV @ R.T + T
handJ = handJ @ R.T + T
handV2d = handV @ camera.T
handV2d = handV2d[:, :2] / handV2d[:, 2:]
handJ2d = handJ @ camera.T
handJ2d = handJ2d[:, :2] / handJ2d[:, 2:]
hand_dict[hand_type] = {'hms': hms,
'verts3d': handV, 'joints3d': handJ,
'verts2d': handV2d, 'joints2d': handJ2d,
'R': R @ params['R'][0],
'pose': params['pose'][0],
'shape': params['shape'][0],
'camera': camera,
'org_R': R,
'org_T': T,
}
return img, mask, dense, hand_dict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default='/data/hand_data/AFOF/InterHand_HFPS/InterHand2.6M_30fps_batch1')
parser.add_argument("--save_path", type=str, default='/data/hand_data/AFOF/InterHand_processed_intag_seq')
opt = parser.parse_args()
for split in ['val']:
select_data(opt.data_path, opt.save_path, split=split)
for split in ['val']:
render_data(opt.save_path, split)
| 13,958 | 43.597444 | 138 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model.py | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import numpy as np
from dataset.dataset_utils import IMG_SIZE
from models.encoder import load_encoder
from models.decoder import load_decoder
from utils.config import load_cfg
class HandNET_GCN(nn.Module):
def __init__(self, encoder, mid_model, decoder):
super(HandNET_GCN, self).__init__()
self.encoder = encoder
self.mid_model = mid_model
self.decoder = decoder
def forward(self, img):
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.encoder(img)
global_feature, fmaps = self.mid_model(img_fmaps, hms_fmaps, dp_fmaps)
result, paramsDict, handDictList, otherInfo = self.decoder(global_feature, fmaps)
if hms is not None:
otherInfo['hms'] = hms
if mask is not None:
otherInfo['mask'] = mask
if dp is not None:
otherInfo['dense'] = dp
return result, paramsDict, handDictList, otherInfo
def load_model(cfg):
if isinstance(cfg, str):
cfg = load_cfg(cfg)
encoder, mid_model = load_encoder(cfg)
decoder = load_decoder(cfg, mid_model.get_info())
model = HandNET_GCN(encoder, mid_model, decoder)
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
path = os.path.join(abspath, str(cfg.MODEL_PARAM.MODEL_PRETRAIN_PATH))
if os.path.exists(path):
state = torch.load(path, map_location='cpu')
print('load model params from {}'.format(path))
try:
model.load_state_dict(state)
except:
state2 = {}
for k, v in state.items():
state2[k[7:]] = v
model.load_state_dict(state2)
return model
| 1,863 | 29.557377 | 89 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/encoder.py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import numpy as np
from dataset.dataset_utils import IMG_SIZE
from utils.utils import projection_batch
from models.manolayer import ManoLayer
from models.model_zoo import get_hrnet, conv1x1, conv3x3, deconv3x3, weights_init, GCN_vert_convert, build_fc_layer, Bottleneck
from utils.config import load_cfg
from torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152
class ResNetSimple_decoder(nn.Module):
def __init__(self, expansion=4,
fDim=[256, 256, 256, 256], direction=['flat', 'up', 'up', 'up'],
out_dim=3):
super(ResNetSimple_decoder, self).__init__()
self.models = nn.ModuleList()
fDim = [512 * expansion] + fDim
for i in range(len(direction)):
kernel_size = 1 if direction[i] == 'flat' else 3
self.models.append(self.make_layer(fDim[i], fDim[i + 1], direction[i], kernel_size=kernel_size))
self.final_layer = nn.Conv2d(
in_channels=fDim[-1],
out_channels=out_dim,
kernel_size=1,
stride=1,
padding=0
)
def make_layer(self, in_dim, out_dim,
direction, kernel_size=3, relu=True, bn=True):
assert direction in ['flat', 'up']
assert kernel_size in [1, 3]
if kernel_size == 3:
padding = 1
elif kernel_size == 1:
padding = 0
layers = []
if direction == 'up':
layers.append(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True))
layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=1, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.BatchNorm2d(out_dim))
return nn.Sequential(*layers)
def forward(self, x):
fmaps = []
for i in range(len(self.models)):
x = self.models[i](x)
fmaps.append(x)
x = self.final_layer(x)
return x, fmaps
class ResNetSimple(nn.Module):
def __init__(self, model_type='resnet50',
pretrained=False,
fmapDim=[256, 256, 256, 256],
handNum=2,
heatmapDim=21):
super(ResNetSimple, self).__init__()
assert model_type in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
if model_type == 'resnet18':
self.resnet = resnet18(pretrained=pretrained)
self.expansion = 1
elif model_type == 'resnet34':
self.resnet = resnet34(pretrained=pretrained)
self.expansion = 1
elif model_type == 'resnet50':
self.resnet = resnet50(pretrained=pretrained)
self.expansion = 4
elif model_type == 'resnet101':
self.resnet = resnet101(pretrained=pretrained)
self.expansion = 4
elif model_type == 'resnet152':
self.resnet = resnet152(pretrained=pretrained)
self.expansion = 4
self.hms_decoder = ResNetSimple_decoder(expansion=self.expansion,
fDim=fmapDim,
direction=['flat', 'up', 'up', 'up'],
out_dim=heatmapDim * handNum)
for m in self.hms_decoder.modules():
weights_init(m)
self.dp_decoder = ResNetSimple_decoder(expansion=self.expansion,
fDim=fmapDim,
direction=['flat', 'up', 'up', 'up'],
out_dim=handNum + 3 * handNum)
self.handNum = handNum
for m in self.dp_decoder.modules():
weights_init(m)
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x4 = self.resnet.layer1(x)
x3 = self.resnet.layer2(x4)
x2 = self.resnet.layer3(x3)
x1 = self.resnet.layer4(x2)
img_fmaps = [x1, x2, x3, x4]
hms, hms_fmaps = self.hms_decoder(x1)
out, dp_fmaps = self.dp_decoder(x1)
mask = out[:, :self.handNum]
dp = out[:, self.handNum:]
return hms, mask, dp, \
img_fmaps, hms_fmaps, dp_fmaps
class resnet_mid(nn.Module):
def __init__(self,
model_type='resnet50',
in_fmapDim=[256, 256, 256, 256],
out_fmapDim=[256, 256, 256, 256]):
super(resnet_mid, self).__init__()
assert model_type in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
if model_type == 'resnet18' or model_type == 'resnet34':
self.expansion = 1
elif model_type == 'resnet50' or model_type == 'resnet101' or model_type == 'resnet152':
self.expansion = 4
self.img_fmaps_dim = [512 * self.expansion, 256 * self.expansion,
128 * self.expansion, 64 * self.expansion]
self.dp_fmaps_dim = in_fmapDim
self.hms_fmaps_dim = in_fmapDim
self.convs = nn.ModuleList()
for i in range(len(out_fmapDim)):
inDim = self.dp_fmaps_dim[i] + self.hms_fmaps_dim[i]
if i > 0:
inDim = inDim + self.img_fmaps_dim[i]
self.convs.append(conv1x1(inDim, out_fmapDim[i]))
self.output_layer = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(start_dim=1),
)
self.global_feature_dim = 512 * self.expansion
self.fmaps_dim = out_fmapDim
def get_info(self):
return {'global_feature_dim': self.global_feature_dim,
'fmaps_dim': self.fmaps_dim}
def forward(self, img_fmaps, hms_fmaps, dp_fmaps):
global_feature = self.output_layer(img_fmaps[0])
fmaps = []
for i in range(len(self.convs)):
x = torch.cat((hms_fmaps[i], dp_fmaps[i]), dim=1)
if i > 0:
x = torch.cat((x, img_fmaps[i]), dim=1)
fmaps.append(self.convs[i](x))
return global_feature, fmaps
class HRnet_encoder(nn.Module):
def __init__(self, model_type, pretrained='', handNum=2, heatmapDim=21):
super(HRnet_encoder, self).__init__()
name = 'w' + model_type[model_type.find('hrnet') + 5:]
assert name in ['w18', 'w18_small_v1', 'w18_small_v2', 'w30', 'w32', 'w40', 'w44', 'w48', 'w64']
self.hrnet = get_hrnet(name=name,
in_channels=3,
head_type='none',
pretrained='')
if os.path.isfile(pretrained):
print('load pretrained params: {}'.format(pretrained))
pretrained_dict = torch.load(pretrained)
model_dict = self.hrnet.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys() and k.find('classifier') == -1}
model_dict.update(pretrained_dict)
self.hrnet.load_state_dict(model_dict)
self.fmaps_dim = list(self.hrnet.stage4_cfg['NUM_CHANNELS'])
self.fmaps_dim.reverse()
self.hms_decoder = self.mask_decoder(outDim=heatmapDim * handNum)
for m in self.hms_decoder.modules():
weights_init(m)
self.dp_decoder = self.mask_decoder(outDim=1 + 3 * handNum)
for m in self.dp_decoder.modules():
weights_init(m)
def mask_decoder(self, outDim=3):
last_inp_channels = 0
for temp in self.fmaps_dim:
last_inp_channels += temp
return nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels, out_channels=last_inp_channels,
kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels, out_channels=outDim,
kernel_size=1, stride=1, padding=0)
)
def forward(self, img):
ylist = self.hrnet(img)
# Upsampling
x0_h, x0_w = ylist[0].size(2), ylist[0].size(3)
x1 = F.interpolate(ylist[1], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x2 = F.interpolate(ylist[2], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x3 = F.interpolate(ylist[3], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x = torch.cat([ylist[0], x1, x2, x3], 1)
hms = self.hms_decoder(x)
out = self.dp_decoder(x)
mask = out[:, 0]
dp = out[:, 1:]
ylist.reverse()
return hms, mask, dp, \
ylist, None, None
class hrnet_mid(nn.Module):
def __init__(self,
model_type,
in_fmapDim=[256, 256, 256, 256],
out_fmapDim=[256, 256, 256, 256]):
super(hrnet_mid, self).__init__()
name = 'w' + model_type[model_type.find('hrnet') + 5:]
assert name in ['w18', 'w18_small_v1', 'w18_small_v2', 'w30', 'w32', 'w40', 'w44', 'w48', 'w64']
self.convs = nn.ModuleList()
for i in range(len(out_fmapDim)):
self.convs.append(conv1x1(in_fmapDim[i], out_fmapDim[i]))
self.global_feature_dim = 2048
self.fmaps_dim = out_fmapDim
in_fmapDim.reverse()
self.incre_modules, self.downsamp_modules, \
self.final_layer = self._make_head(in_fmapDim)
def get_info(self):
return {'global_feature_dim': self.global_feature_dim,
'fmaps_dim': self.fmaps_dim}
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=0.1),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_module = self._make_layer(head_block,
channels,
head_channels[i],
1,
stride=1)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i + 1] * head_block.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1),
nn.BatchNorm2d(out_channels, momentum=0.1),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=head_channels[3] * head_block.expansion,
out_channels=2048,
kernel_size=1,
stride=1,
padding=0
),
nn.BatchNorm2d(2048, momentum=0.1),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def forward(self, img_fmaps, hms_fmaps=None, dp_fmaps=None):
fmaps = []
for i in range(len(self.convs)):
fmaps.append(self.convs[i](img_fmaps[i]))
img_fmaps.reverse()
y = self.incre_modules[0](img_fmaps[0])
for i in range(len(self.downsamp_modules)):
y = self.incre_modules[i + 1](img_fmaps[i + 1]) + \
self.downsamp_modules[i](y)
y = self.final_layer(y)
if torch._C._get_tracing_state():
y = y.flatten(start_dim=2).mean(dim=2)
else:
y = F.avg_pool2d(y, kernel_size=y.size()
[2:]).view(y.size(0), -1)
return y, fmaps
def load_encoder(cfg):
if cfg.MODEL.ENCODER_TYPE.find('resnet') != -1:
encoder = ResNetSimple(model_type=cfg.MODEL.ENCODER_TYPE,
pretrained=True,
fmapDim=[128, 128, 128, 128],
handNum=2,
heatmapDim=21)
mid_model = resnet_mid(model_type=cfg.MODEL.ENCODER_TYPE,
in_fmapDim=[128, 128, 128, 128],
out_fmapDim=cfg.MODEL.DECONV_DIMS)
if cfg.MODEL.ENCODER_TYPE.find('hrnet') != -1:
encoder = HRnet_encoder(model_type=cfg.MODEL.ENCODER_TYPE,
pretrained=cfg.MODEL.ENCODER_PRETRAIN_PATH,
handNum=2,
heatmapDim=21)
mid_model = hrnet_mid(model_type=cfg.MODEL.ENCODER_TYPE,
in_fmapDim=encoder.fmaps_dim,
out_fmapDim=cfg.MODEL.DECONV_DIMS)
return encoder, mid_model
| 14,103 | 36.610667 | 127 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/decoder.py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import numpy as np
from dataset.dataset_utils import IMG_SIZE, BONE_LENGTH
from utils.utils import projection_batch, get_dense_color_path, get_graph_dict_path, get_upsample_path
from models.model_zoo import GCN_vert_convert, graph_upsample, graph_avg_pool
from models.model_attn import DualGraph
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
class decoder(nn.Module):
def __init__(self,
global_feature_dim=2048,
f_in_Dim=[256, 256, 256, 256],
f_out_Dim=[128, 64, 32],
gcn_in_dim=[256, 128, 128],
gcn_out_dim=[128, 128, 64],
graph_k=2,
graph_layer_num=4,
left_graph_dict={},
right_graph_dict={},
vertex_num=778,
dense_coor=None,
num_attn_heads=4,
upsample_weight=None,
dropout=0.05):
super(decoder, self).__init__()
assert len(f_in_Dim) == 4
f_in_Dim = f_in_Dim[:-1]
assert len(gcn_in_dim) == 3
for i in range(len(gcn_out_dim) - 1):
assert gcn_out_dim[i] == gcn_in_dim[i + 1]
graph_dict = {'left': left_graph_dict, 'right': right_graph_dict}
graph_dict['left']['coarsen_graphs_L'].reverse()
graph_dict['right']['coarsen_graphs_L'].reverse()
graph_L = {}
for hand_type in ['left', 'right']:
graph_L[hand_type] = graph_dict[hand_type]['coarsen_graphs_L']
self.vNum_in = graph_L['left'][0].shape[0]
self.vNum_out = graph_L['left'][2].shape[0]
self.vNum_all = graph_L['left'][-1].shape[0]
self.vNum_mano = vertex_num
self.gf_dim = global_feature_dim
self.gcn_in_dim = gcn_in_dim
self.gcn_out_dim = gcn_out_dim
if dense_coor is not None:
dense_coor = torch.from_numpy(dense_coor).float()
self.register_buffer('dense_coor', dense_coor)
self.converter = {}
for hand_type in ['left', 'right']:
self.converter[hand_type] = GCN_vert_convert(vertex_num=self.vNum_mano,
graph_perm_reverse=graph_dict[hand_type]['graph_perm_reverse'],
graph_perm=graph_dict[hand_type]['graph_perm'])
self.dual_gcn = DualGraph(verts_in_dim=self.gcn_in_dim,
verts_out_dim=self.gcn_out_dim,
graph_L_Left=graph_L['left'][:3],
graph_L_Right=graph_L['right'][:3],
graph_k=[graph_k, graph_k, graph_k],
graph_layer_num=[graph_layer_num, graph_layer_num, graph_layer_num],
img_size=[8, 16, 32],
img_f_dim=f_in_Dim,
grid_size=[8, 8, 8],
grid_f_dim=f_out_Dim,
n_heads=num_attn_heads,
dropout=dropout)
self.gf_layer_left = nn.Sequential(*(nn.Linear(self.gf_dim, self.gcn_in_dim[0] - 3),
nn.LayerNorm(self.gcn_in_dim[0] - 3, eps=1e-6)))
self.gf_layer_right = nn.Sequential(*(nn.Linear(self.gf_dim, self.gcn_in_dim[0] - 3),
nn.LayerNorm(self.gcn_in_dim[0] - 3, eps=1e-6)))
self.unsample_layer = nn.Linear(self.vNum_out, self.vNum_mano, bias=False)
self.coord_head = nn.Linear(self.gcn_out_dim[-1], 3)
self.avg_head = nn.Linear(self.vNum_out, 1)
self.params_head = nn.Linear(self.gcn_out_dim[-1], 3)
weights_init(self.gf_layer_left)
weights_init(self.gf_layer_right)
weights_init(self.coord_head)
weights_init(self.avg_head)
weights_init(self.params_head)
if upsample_weight is not None:
state = {'weight': upsample_weight.to(self.unsample_layer.weight.data.device)}
self.unsample_layer.load_state_dict(state)
else:
weights_init(self.unsample_layer)
def get_upsample_weight(self):
return self.unsample_layer.weight.data
def get_converter(self):
return self.converter
def get_hand_pe(self, bs, num=None):
if num is None:
num = self.vNum_in
dense_coor = self.dense_coor.repeat(bs, 1, 1) * 2 - 1
pel = self.converter['left'].vert_to_GCN(dense_coor)
pel = graph_avg_pool(pel, p=pel.shape[1] // num)
per = self.converter['right'].vert_to_GCN(dense_coor)
per = graph_avg_pool(per, p=per.shape[1] // num)
return pel, per
def forward(self, x, fmaps):
assert x.shape[1] == self.gf_dim
fmaps = fmaps[:-1]
bs = x.shape[0]
pel, per = self.get_hand_pe(bs, num=self.vNum_in)
Lf = torch.cat([self.gf_layer_left(x).unsqueeze(1).repeat(1, self.vNum_in, 1), pel], dim=-1)
Rf = torch.cat([self.gf_layer_right(x).unsqueeze(1).repeat(1, self.vNum_in, 1), per], dim=-1)
Lf, Rf = self.dual_gcn(Lf, Rf, fmaps)
scale = {}
trans2d = {}
temp = self.avg_head(Lf.transpose(-1, -2))[..., 0]
temp = self.params_head(temp)
scale['left'] = temp[:, 0]
trans2d['left'] = temp[:, 1:]
temp = self.avg_head(Rf.transpose(-1, -2))[..., 0]
temp = self.params_head(temp)
scale['right'] = temp[:, 0]
trans2d['right'] = temp[:, 1:]
handDictList = []
paramsDict = {'scale': scale, 'trans2d': trans2d}
verts3d = {'left': self.coord_head(Lf), 'right': self.coord_head(Rf)}
verts2d = {}
result = {'verts3d': {}, 'verts2d': {}}
for hand_type in ['left', 'right']:
verts2d[hand_type] = projection_batch(scale[hand_type], trans2d[hand_type], verts3d[hand_type], img_size=IMG_SIZE)
result['verts3d'][hand_type] = self.unsample_layer(verts3d[hand_type].transpose(1, 2)).transpose(1, 2)
result['verts2d'][hand_type] = projection_batch(scale[hand_type], trans2d[hand_type], result['verts3d'][hand_type], img_size=IMG_SIZE)
handDictList.append({'verts3d': verts3d, 'verts2d': verts2d})
otherInfo = {}
otherInfo['verts3d_MANO_list'] = {'left': [], 'right': []}
otherInfo['verts2d_MANO_list'] = {'left': [], 'right': []}
for i in range(len(handDictList)):
for hand_type in ['left', 'right']:
v = handDictList[i]['verts3d'][hand_type]
v = graph_upsample(v, p=self.vNum_all // v.shape[1])
otherInfo['verts3d_MANO_list'][hand_type].append(self.converter[hand_type].GCN_to_vert(v))
v = handDictList[i]['verts2d'][hand_type]
v = graph_upsample(v, p=self.vNum_all // v.shape[1])
otherInfo['verts2d_MANO_list'][hand_type].append(self.converter[hand_type].GCN_to_vert(v))
return result, paramsDict, handDictList, otherInfo
def load_decoder(cfg, encoder_info):
graph_path = get_graph_dict_path()
with open(graph_path['left'], 'rb') as file:
left_graph_dict = pickle.load(file)
with open(graph_path['right'], 'rb') as file:
right_graph_dict = pickle.load(file)
dense_path = get_dense_color_path()
with open(dense_path, 'rb') as file:
dense_coor = pickle.load(file)
upsample_path = get_upsample_path()
with open(upsample_path, 'rb') as file:
upsample_weight = pickle.load(file)
upsample_weight = torch.from_numpy(upsample_weight).float()
model = decoder(
global_feature_dim=encoder_info['global_feature_dim'],
f_in_Dim=encoder_info['fmaps_dim'],
f_out_Dim=cfg.MODEL.IMG_DIMS,
gcn_in_dim=cfg.MODEL.GCN_IN_DIM,
gcn_out_dim=cfg.MODEL.GCN_OUT_DIM,
graph_k=cfg.MODEL.graph_k,
graph_layer_num=cfg.MODEL.graph_layer_num,
vertex_num=778,
dense_coor=dense_coor,
left_graph_dict=left_graph_dict,
right_graph_dict=right_graph_dict,
num_attn_heads=4,
upsample_weight=upsample_weight,
dropout=cfg.TRAIN.dropout
)
return model
| 8,780 | 40.814286 | 146 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/manolayer.py | import pickle
import numpy as np
import torch
from torch.nn import Module
def convert_mano_pkl(loadPath, savePath):
# in original MANO pkl file, 'shapedirs' component is a chumpy object, convert it to a numpy array
manoData = pickle.load(open(loadPath, 'rb'), encoding='latin1')
output = {}
manoData['shapedirs'].r
for (k, v) in manoData.items():
if k == 'shapedirs':
output['shapedirs'] = v.r
else:
output[k] = v
pickle.dump(output, open(savePath, 'wb'))
def vec2mat(vec):
# vec: bs * 6
# output: bs * 3 * 3
x = vec[:, 0:3]
y = vec[:, 3:6]
x = x / (torch.norm(x, p=2, dim=1, keepdim=True) + 1e-8)
y = y - torch.sum(x * y, dim=1, keepdim=True) * x
y = y / (torch.norm(y, p=2, dim=1, keepdim=True) + 1e-8)
z = torch.cross(x, y)
return torch.stack([x, y, z], dim=2)
def rodrigues_batch(axis):
# axis : bs * 3
# return: bs * 3 * 3
bs = axis.shape[0]
Imat = torch.eye(3, dtype=axis.dtype, device=axis.device).repeat(bs, 1, 1) # bs * 3 * 3
angle = torch.norm(axis, p=2, dim=1, keepdim=True) + 1e-8 # bs * 1
axes = axis / angle # bs * 3
sin = torch.sin(angle).unsqueeze(2) # bs * 1 * 1
cos = torch.cos(angle).unsqueeze(2) # bs * 1 * 1
L = torch.zeros((bs, 3, 3), dtype=axis.dtype, device=axis.device)
L[:, 2, 1] = axes[:, 0]
L[:, 1, 2] = -axes[:, 0]
L[:, 0, 2] = axes[:, 1]
L[:, 2, 0] = -axes[:, 1]
L[:, 1, 0] = axes[:, 2]
L[:, 0, 1] = -axes[:, 2]
return Imat + sin * L + (1 - cos) * L.bmm(L)
def get_trans(old_z, new_z):
# z: bs x 3
x = torch.cross(old_z, new_z)
x = x / torch.norm(x, dim=1, keepdim=True)
old_y = torch.cross(old_z, x)
new_y = torch.cross(new_z, x)
old_frame = torch.stack((x, old_y, old_z), axis=2)
new_frame = torch.stack((x, new_y, new_z), axis=2)
trans = torch.matmul(new_frame, old_frame.permute(0, 2, 1))
return trans
def build_mano_frame(skelBatch):
# skelBatch: bs x 21 x 3
bs = skelBatch.shape[0]
mano_son = [2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16] # 15
mano_parent = [-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14] # 16
palm_idx = [13, 1, 4, 10, 7]
mano_order = [0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20] # 21
skel = skelBatch[:, mano_order]
z = skel[:, mano_son] - skel[:, 1:16] # bs x 15 x 3
z = z / torch.norm(z, dim=2, keepdim=True)
z = torch.cat((torch.zeros_like(z[:, 0:1]), z), axis=1) # bs x 16 x 3
x = torch.zeros_like(z) # bs x 16 x 3
x[:, :, 1] = 1.0
y = torch.zeros_like(z) # bs x 16 x 3
palm = skel[:, palm_idx] - skel[:, 0:1] # bs x 5 x 3
n = torch.cross(palm[:, :-1], palm[:, 1:]) # bs x 4 x 3
n = n / torch.norm(n, dim=2, keepdim=True)
palm_x = torch.zeros((bs, 5, 3), dtype=n.dtype, device=n.device)
palm_x[:, :-1] = palm_x[:, :-1] + n
palm_x[:, 1:] = palm_x[:, 1:] + n
palm_x = palm_x / torch.norm(palm_x, dim=2, keepdim=True)
x[:, palm_idx] = palm_x
y[:, palm_idx] = torch.cross(z[:, palm_idx], x[:, palm_idx])
y[:, palm_idx] = y[:, palm_idx] / torch.norm(y[:, palm_idx], dim=2, keepdim=True)
x[:, palm_idx] = torch.cross(y[:, palm_idx], z[:, palm_idx])
frame = torch.stack((x, y, z), axis=3) # bs x 15 x 3 x 3
for i in range(1, 16):
if i in palm_idx:
continue
trans = get_trans(z[:, mano_parent[i]], z[:, i])
frame[:, i] = torch.matmul(trans, frame[:, mano_parent[i]])
return frame[:, 1:]
class ManoLayer(Module):
def __init__(self, manoPath, center_idx=9, use_pca=True, new_skel=False):
super(ManoLayer, self).__init__()
self.center_idx = center_idx
self.use_pca = use_pca
self.new_skel = new_skel
manoData = pickle.load(open(manoPath, 'rb'), encoding='latin1')
self.new_order = [0,
13, 14, 15, 16,
1, 2, 3, 17,
4, 5, 6, 18,
10, 11, 12, 19,
7, 8, 9, 20]
# 45 * 45: PCA mat
self.register_buffer('hands_components', torch.from_numpy(manoData['hands_components'].astype(np.float32)))
hands_components_inv = torch.inverse(self.hands_components)
self.register_buffer('hands_components_inv', hands_components_inv)
# 16 * 778, J_regressor is a scipy csc matrix
J_regressor = manoData['J_regressor'].tocoo(copy=False)
location = []
data = []
for i in range(J_regressor.data.shape[0]):
location.append([J_regressor.row[i], J_regressor.col[i]])
data.append(J_regressor.data[i])
i = torch.LongTensor(location)
v = torch.FloatTensor(data)
self.register_buffer('J_regressor', torch.sparse.FloatTensor(i.t(), v, torch.Size([16, 778])).to_dense(),
persistent=False)
# 16 * 3
self.register_buffer('J_zero', torch.from_numpy(manoData['J'].astype(np.float32)), persistent=False)
# 778 * 16
self.register_buffer('weights', torch.from_numpy(manoData['weights'].astype(np.float32)), persistent=False)
# (778, 3, 135)
self.register_buffer('posedirs', torch.from_numpy(manoData['posedirs'].astype(np.float32)), persistent=False)
# (778, 3)
self.register_buffer('v_template', torch.from_numpy(manoData['v_template'].astype(np.float32)), persistent=False)
# (778, 3, 10) shapedirs is <class 'chumpy.reordering.Select'>
if isinstance(manoData['shapedirs'], np.ndarray):
self.register_buffer('shapedirs', torch.Tensor(manoData['shapedirs']).float(), persistent=False)
else:
self.register_buffer('shapedirs', torch.Tensor(manoData['shapedirs'].r.copy()).float(), persistent=False)
# 45
self.register_buffer('hands_mean', torch.from_numpy(manoData['hands_mean'].astype(np.float32)), persistent=False)
self.faces = manoData['f'] # 1538 * 3: faces
self.parent = [-1, ]
for i in range(1, 16):
self.parent.append(manoData['kintree_table'][0, i])
def get_faces(self):
return self.faces
def train(self, mode=True):
self.is_train = mode
def eval(self):
self.train(False)
def pca2axis(self, pca):
rotation_axis = pca.mm(self.hands_components[:pca.shape[1]]) # bs * 45
rotation_axis = rotation_axis + self.hands_mean
return rotation_axis # bs * 45
def pca2Rmat(self, pca):
return self.axis2Rmat(self.pca2axis(pca))
def axis2Rmat(self, axis):
# axis: bs x 45
rotation_mat = rodrigues_batch(axis.view(-1, 3))
rotation_mat = rotation_mat.view(-1, 15, 3, 3)
return rotation_mat
def axis2pca(self, axis):
# axis: bs x 45
pca = axis - self.hands_mean
pca = pca.mm(self.hands_components_inv)
return pca
def Rmat2pca(self, R):
# R: bs x 15 x 3 x 3
return self.axis2pca(self.Rmat2axis(R))
def Rmat2axis(self, R):
# R: bs x 3 x 3
R = R.view(-1, 3, 3)
temp = (R - R.permute(0, 2, 1)) / 2
L = temp[:, [2, 0, 1], [1, 2, 0]] # bs x 3
sin = torch.norm(L, dim=1, keepdim=False) # bs
L = L / (sin.unsqueeze(-1) + 1e-8)
temp = (R + R.permute(0, 2, 1)) / 2
temp = temp - torch.eye((3), dtype=R.dtype, device=R.device)
temp2 = torch.matmul(L.unsqueeze(-1), L.unsqueeze(1))
temp2 = temp2 - torch.eye((3), dtype=R.dtype, device=R.device)
temp = temp[:, 0, 0] + temp[:, 1, 1] + temp[:, 2, 2]
temp2 = temp2[:, 0, 0] + temp2[:, 1, 1] + temp2[:, 2, 2]
cos = 1 - temp / (temp2 + 1e-8) # bs
sin = torch.clamp(sin, min=-1 + 1e-7, max=1 - 1e-7)
theta = torch.asin(sin)
# prevent in-place operation
theta2 = torch.zeros_like(theta)
theta2[:] = theta
idx1 = (cos < 0) & (sin > 0)
idx2 = (cos < 0) & (sin < 0)
theta2[idx1] = 3.14159 - theta[idx1]
theta2[idx2] = -3.14159 - theta[idx2]
axis = theta2.unsqueeze(-1) * L
return axis.view(-1, 45)
def get_local_frame(self, shape):
# output: frame[..., [0,1,2]] = [splay, bend, twist]
# get local joint frame at zero pose
with torch.no_grad():
shapeBlendShape = torch.matmul(self.shapedirs, shape.permute(1, 0)).permute(2, 0, 1)
v_shaped = self.v_template + shapeBlendShape # bs * 778 * 3
j_tpose = torch.matmul(self.J_regressor, v_shaped) # bs * 16 * 3
j_tpose_21 = torch.cat((j_tpose, v_shaped[:, [744, 320, 444, 555, 672]]), axis=1)
j_tpose_21 = j_tpose_21[:, self.new_order]
frame = build_mano_frame(j_tpose_21)
return frame # bs x 15 x 3 x 3
@staticmethod
def buildSE3_batch(R, t):
# R: bs * 3 * 3
# t: bs * 3 * 1
# return: bs * 4 * 4
bs = R.shape[0]
pad = torch.zeros((bs, 1, 4), dtype=R.dtype, device=R.device)
pad[:, 0, 3] = 1.0
temp = torch.cat([R, t], 2) # bs * 3 * 4
return torch.cat([temp, pad], 1)
@staticmethod
def SE3_apply(SE3, v):
# SE3: bs * 4 * 4
# v: bs * 3
# return: bs * 3
bs = v.shape[0]
pad = torch.ones((bs, 1), dtype=v.dtype, device=v.device)
temp = torch.cat([v, pad], 1).unsqueeze(2) # bs * 4 * 1
return SE3.bmm(temp)[:, :3, 0]
def forward(self, root_rotation, pose, shape, trans=None, scale=None):
# input
# root_rotation : bs * 3 * 3
# pose : bs * ncomps or bs * 15 * 3 * 3
# shape : bs * 10
# trans : bs * 3 or None
# scale : bs or None
bs = root_rotation.shape[0]
if self.use_pca:
rotation_mat = self.pca2Rmat(pose)
else:
rotation_mat = pose
shapeBlendShape = torch.matmul(self.shapedirs, shape.permute(1, 0)).permute(2, 0, 1)
v_shaped = self.v_template + shapeBlendShape # bs * 778 * 3
j_tpose = torch.matmul(self.J_regressor, v_shaped) # bs * 16 * 3
Imat = torch.eye(3, dtype=rotation_mat.dtype, device=rotation_mat.device).repeat(bs, 15, 1, 1)
pose_shape = rotation_mat.view(bs, -1) - Imat.view(bs, -1) # bs * 135
poseBlendShape = torch.matmul(self.posedirs, pose_shape.permute(1, 0)).permute(2, 0, 1)
v_tpose = v_shaped + poseBlendShape # bs * 778 * 3
SE3_j = []
R = root_rotation
t = (torch.eye(3, dtype=pose.dtype, device=pose.device).repeat(bs, 1, 1) - R).bmm(j_tpose[:, 0].unsqueeze(2))
SE3_j.append(self.buildSE3_batch(R, t))
for i in range(1, 16):
R = rotation_mat[:, i - 1]
t = (torch.eye(3, dtype=pose.dtype, device=pose.device).repeat(bs, 1, 1) - R).bmm(j_tpose[:, i].unsqueeze(2))
SE3_local = self.buildSE3_batch(R, t)
SE3_j.append(torch.matmul(SE3_j[self.parent[i]], SE3_local))
SE3_j = torch.stack(SE3_j, dim=1) # bs * 16 * 4 * 4
j_withoutTips = []
j_withoutTips.append(j_tpose[:, 0])
for i in range(1, 16):
j_withoutTips.append(self.SE3_apply(SE3_j[:, self.parent[i]], j_tpose[:, i]))
# there is no boardcast matmul for sparse matrix for now (pytorch 1.6.0)
SE3_v = torch.matmul(self.weights, SE3_j.view(bs, 16, 16)).view(bs, -1, 4, 4) # bs * 778 * 4 * 4
v_output = SE3_v[:, :, :3, :3].matmul(v_tpose.unsqueeze(3)) + SE3_v[:, :, :3, 3:4]
v_output = v_output[:, :, :, 0] # bs * 778 * 3
jList = j_withoutTips + [v_output[:, 745], v_output[:, 317], v_output[:, 444], v_output[:, 556], v_output[:, 673]]
j_output = torch.stack(jList, dim=1)
j_output = j_output[:, self.new_order]
if self.center_idx is not None:
center = j_output[:, self.center_idx:(self.center_idx + 1)]
v_output = v_output - center
j_output = j_output - center
if scale is not None:
scale = scale.unsqueeze(1).unsqueeze(2) # bs * 1 * 1
v_output = v_output * scale
j_output = j_output * scale
if trans is not None:
trans = trans.unsqueeze(1) # bs * 1 * 3
v_output = v_output + trans
j_output = j_output + trans
if self.new_skel:
j_output[:, 5] = (v_output[:, 63] + v_output[:, 144]) / 2
j_output[:, 9] = (v_output[:, 271] + v_output[:, 220]) / 2
j_output[:, 13] = (v_output[:, 148] + v_output[:, 290]) / 2
j_output[:, 17] = (v_output[:, 770] + v_output[:, 83]) / 2
return v_output, j_output
if __name__ == '__main__':
convert_mano_pkl('models/MANO_RIGHT.pkl', 'MANO_RIGHT.pkl')
convert_mano_pkl('models/MANO_LEFT.pkl', 'MANO_LEFT.pkl')
mano = ManoLayer(manoPath='models/MANO_RIGHT.pkl', center_idx=9, use_pca=True)
pose = torch.rand((10, 30))
shape = torch.rand((10, 10))
rotation = torch.rand((10, 3))
root_rotation = rodrigues_batch(rotation)
trans = torch.rand((10, 3))
scale = torch.rand((10))
v, j = mano(root_rotation=root_rotation,
pose=pose,
shape=shape,
trans=trans,
scale=scale)
| 13,352 | 38.158358 | 122 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_zoo/fc.py | import torch.nn as nn
class noop(nn.Module):
def forward(self, x):
return x
def build_activate_layer(actType):
if actType == 'relu':
return nn.ReLU(inplace=True)
elif actType == 'lrelu':
return nn.LeakyReLU(0.1, inplace=True)
elif actType == 'elu':
return nn.ELU(inplace=True)
elif actType == 'sigmoid':
return nn.Sigmoid()
elif actType == 'tanh':
return nn.Tanh()
elif actType == 'noop':
return noop()
else:
raise RuntimeError('no such activate layer!')
def build_fc_layer(inDim, outDim, actFun='relu', dropout_prob=-1, weight_norm=False):
net = []
if dropout_prob > 0:
net.append(nn.Dropout(p=dropout_prob))
if weight_norm:
net.append(nn.utils.weight_norm(nn.Linear(inDim, outDim)))
else:
net.append(nn.Linear(inDim, outDim))
net.append(build_activate_layer(actFun))
return nn.Sequential(*net)
| 946 | 25.305556 | 85 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_zoo/hrnet.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
BN_MOMENTUM = 0.1
ALIGN_CORNERS = True
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(False)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
nn.BatchNorm2d(num_inchannels[i],
momentum=BN_MOMENTUM),
nn.Upsample(scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(False)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, cfg,
in_channels=3,
head_type="none",
class_num=1000,
out_channels=21,
return_fmapList=False):
super(HighResolutionNet, self).__init__()
self.return_fmapList = return_fmapList
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['MODEL']['EXTRA']['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion * num_channels
self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
self.head_type = head_type
if self.head_type == 'vector':
self.class_num = class_num
# Classification Head
self.incre_modules, self.downsamp_modules, \
self.final_layer = self._make_head(pre_stage_channels)
self.classifier = nn.Linear(2048, class_num)
elif self.head_type == 'feature_map':
self.out_channels = out_channels
last_inp_channels = 0
for temp in pre_stage_channels:
last_inp_channels += temp
self.last_layer = nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0)
)
elif self.head_type == 'vector+feature_map':
self.class_num = class_num
# Classification Head
self.incre_modules, self.downsamp_modules, \
self.final_layer = self._make_head(pre_stage_channels)
self.classifier = nn.Linear(2048, class_num)
self.out_channels = out_channels
last_inp_channels = 0
for temp in pre_stage_channels:
last_inp_channels += temp
self.last_layer = nn.Sequential(
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2d(last_inp_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0)
)
else:
self.head_type = 'none'
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_module = self._make_layer(head_block,
channels,
head_channels[i],
1,
stride=1)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i + 1] * head_block.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=head_channels[3] * head_block.expansion,
out_channels=2048,
kernel_size=1,
stride=1,
padding=0
),
nn.BatchNorm2d(2048, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
nn.BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
if self.head_type == 'none':
return y_list
elif self.head_type == 'vector':
# Classification Head
y = self.incre_modules[0](y_list[0])
for i in range(len(self.downsamp_modules)):
y = self.incre_modules[i + 1](y_list[i + 1]) + \
self.downsamp_modules[i](y)
y = self.final_layer(y)
if torch._C._get_tracing_state():
y = y.flatten(start_dim=2).mean(dim=2)
else:
y = F.avg_pool2d(y, kernel_size=y.size()
[2:]).view(y.size(0), -1)
y = self.classifier(y)
if self.return_fmapList:
return y, y_list
else:
return y
elif self.head_type == 'feature_map':
x = y_list
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x = torch.cat([x[0], x1, x2, x3], 1)
x = self.last_layer(x)
if self.return_fmapList:
return x, y_list
else:
return x
elif self.head_type == 'vector+feature_map':
# Classification Head
f = self.incre_modules[0](y_list[0])
for i in range(len(self.downsamp_modules)):
f = self.incre_modules[i + 1](y_list[i + 1]) + \
self.downsamp_modules[i](f)
f = self.final_layer(f)
if torch._C._get_tracing_state():
f = f.flatten(start_dim=2).mean(dim=2)
else:
f = F.avg_pool2d(f, kernel_size=f.size()
[2:]).view(f.size(0), -1)
f = self.classifier(f)
# Upsampling
x0_h, x0_w = y_list[0].size(2), y_list[0].size(3)
x1 = F.interpolate(y_list[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x2 = F.interpolate(y_list[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x3 = F.interpolate(y_list[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
x = torch.cat([y_list[0], x1, x2, x3], 1)
x = self.last_layer(x)
if self.return_fmapList:
return f, x, y_list
else:
return f, x
def init_weights(self, pretrained='',):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def stage_config(NUM_MODULES=1, NUM_RANCHES=1, BLOCK='BOTTLENECK', NUM_BLOCKS=[4], NUM_CHANNELS=[64], FUSE_METHOD='SUM'):
cfg = {}
cfg['NUM_MODULES'] = NUM_MODULES
if BLOCK == 'BOTTLENECK':
cfg['NUM_RANCHES'] = NUM_RANCHES
else:
cfg['NUM_BRANCHES'] = NUM_RANCHES
cfg['BLOCK'] = BLOCK
cfg['NUM_BLOCKS'] = NUM_BLOCKS
cfg['NUM_CHANNELS'] = NUM_CHANNELS
cfg['FUSE_METHOD'] = FUSE_METHOD
return cfg
def get_config(name='none'):
cfg = {}
cfg['MODEL'] = {}
cfg['MODEL']['EXTRA'] = {}
if name == 'w18':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [18, 36], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [18, 36, 72], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [18, 36, 72, 144], 'SUM')
elif name == 'w18_small_v1':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [1], [32], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [2, 2], [16, 32], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(1, 3, 'BASIC', [2, 2, 2], [16, 32, 64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(1, 4, 'BASIC', [2, 2, 2, 2], [16, 32, 64, 128], 'SUM')
elif name == 'w18_small_v2':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [2], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [2, 2], [18, 36], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(3, 3, 'BASIC', [2, 2, 2], [18, 36, 72], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(2, 4, 'BASIC', [2, 2, 2, 2], [18, 36, 72, 144], 'SUM')
elif name == 'w30':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [30, 60], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [30, 60, 120], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [30, 60, 120, 240], 'SUM')
elif name == 'w32':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [32, 64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [32, 64, 128], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [32, 64, 128, 256], 'SUM')
elif name == 'w40':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [40, 80], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [40, 80, 160], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [40, 80, 160, 320], 'SUM')
elif name == 'w44':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [44, 88], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [44, 88, 176], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [44, 88, 176, 352], 'SUM')
elif name == 'w48':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [48, 96], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [48, 96, 192], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [48, 96, 192, 384], 'SUM')
elif name == 'w64':
cfg['MODEL']['EXTRA']['STAGE1'] = stage_config(1, 1, 'BOTTLENECK', [4], [64], 'SUM')
cfg['MODEL']['EXTRA']['STAGE2'] = stage_config(1, 2, 'BASIC', [4, 4], [64, 128], 'SUM')
cfg['MODEL']['EXTRA']['STAGE3'] = stage_config(4, 3, 'BASIC', [4, 4, 4], [64, 128, 256], 'SUM')
cfg['MODEL']['EXTRA']['STAGE4'] = stage_config(3, 4, 'BASIC', [4, 4, 4, 4], [64, 128, 256, 512], 'SUM')
else:
raise ValueError('no such HRnet!')
return cfg
def get_hrnet(name='w18',
in_channels=3,
head_type='vector',
class_num=1000,
out_channels=21,
pretrained='',
return_fmapList=False):
model = HighResolutionNet(cfg=get_config(name=name),
in_channels=in_channels,
head_type=head_type,
class_num=class_num,
out_channels=out_channels,
return_fmapList=return_fmapList)
model.init_weights(pretrained)
return model
if __name__ == '__main__':
print('-------------------')
model = get_hrnet(name='w32', head_type='none')
model.cuda()
img = torch.rand((7, 3, 256, 256), dtype=torch.float32).cuda()
out = model(img)
print(out[0].shape)
print(out[1].shape)
print(out[2].shape)
print(out[3].shape)
print('-------------------')
model = get_hrnet(name='w32', head_type='vector', class_num=100)
model.cuda()
img = torch.rand((7, 3, 256, 256), dtype=torch.float32).cuda()
out = model(img)
print(out.shape)
print('-------------------')
model = get_hrnet(name='w32', head_type='feature_map', out_channels=21)
model.cuda()
img = torch.rand((7, 3, 256, 256), dtype=torch.float32).cuda()
out = model(img)
print(out.shape)
print('-------------------')
for name in ['w18', 'w30', 'w32', 'w40', 'w44', 'w48', 'w64']:
model = get_hrnet(name=name, head_type='none')
total = sum([param.nelement() for param in model.parameters()])
print("{} vector: {}M".format(name, total / 1e6))
model.cuda()
img = torch.rand((4, 3, 256, 256), dtype=torch.float32).cuda()
out = model(img)
print(out[0].shape)
print(out[1].shape)
print(out[2].shape)
print(out[3].shape)
# w18 vector: 19.454904M
# w30 vector: 35.86812M
# w32 vector: 39.38858M
# w40 vector: 55.71306M
# w44 vector: 65.220884M
# w48 vector: 75.625764M
# w64 vector: 126.215844M
| 30,039 | 39.430686 | 121 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_zoo/graph_utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# forked from https://github.com/3d-hand-shape/hand-graph-cnn
def sparse_python_to_torch(sp_python):
L = sp_python.tocoo()
indices = np.column_stack((L.row, L.col)).T
indices = indices.astype(np.int64)
indices = torch.from_numpy(indices)
indices = indices.type(torch.LongTensor)
L_data = L.data.astype(np.float32)
L_data = torch.from_numpy(L_data)
L_data = L_data.type(torch.FloatTensor)
L = torch.sparse.FloatTensor(indices, L_data, torch.Size(L.shape))
# if torch.cuda.is_available():
# L = L.cuda()
return L
def graph_max_pool(x, p):
if p > 1:
x = x.permute(0, 2, 1).contiguous() # x = B x F x V
x = nn.MaxPool1d(p)(x) # B x F x V/p
x = x.permute(0, 2, 1).contiguous() # x = B x V/p x F
return x
else:
return x
def graph_avg_pool(x, p):
if p > 1:
x = x.permute(0, 2, 1).contiguous() # x = B x F x V
x = nn.AvgPool1d(p)(x) # B x F x V/p
x = x.permute(0, 2, 1).contiguous() # x = B x V/p x F
return x
else:
return x
# Upsampling of size p.
def graph_upsample(x, p):
if p > 1:
x = x.permute(0, 2, 1).contiguous() # x = B x F x V
x = nn.Upsample(scale_factor=p)(x) # B x F x (V*p)
x = x.permute(0, 2, 1).contiguous() # x = B x (V*p) x F
return x
else:
return x
def graph_conv_cheby(x, cl, L, K=3):
# parameters
# B = batch size
# V = nb vertices
# Fin = nb input features
# Fout = nb output features
# K = Chebyshev order & support size
B, V, Fin = x.size()
B, V, Fin = int(B), int(V), int(Fin)
# transform to Chebyshev basis
x0 = x.permute(1, 2, 0).contiguous() # V x Fin x B
x0 = x0.view([V, Fin * B]) # V x Fin*B
x = x0.unsqueeze(0) # 1 x V x Fin*B
def concat(x, x_):
x_ = x_.unsqueeze(0) # 1 x V x Fin*B
return torch.cat((x, x_), 0) # K x V x Fin*B
if K > 1:
x1 = torch.mm(L, x0) # V x Fin*B
x = torch.cat((x, x1.unsqueeze(0)), 0) # 2 x V x Fin*B
for k in range(2, K):
x2 = 2 * torch.mm(L, x1) - x0
x = torch.cat((x, x2.unsqueeze(0)), 0) # M x Fin*B
x0, x1 = x1, x2
x = x.view([K, V, Fin, B]) # K x V x Fin x B
x = x.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K
x = x.view([B * V, Fin * K]) # B*V x Fin*K
# Compose linearly Fin features to get Fout features
x = cl(x) # B*V x Fout
x = x.view([B, V, -1]) # B x V x Fout
return x
class Graph_CNN_Feat_Mesh(nn.Module):
def __init__(self, num_input_chan, num_mesh_output_chan, graph_L):
print('Graph ConvNet: feature to mesh')
super(Graph_CNN_Feat_Mesh, self).__init__()
self.num_input_chan = num_input_chan
self.num_mesh_output_chan = num_mesh_output_chan
self.graph_L = graph_L
# parameters
self.CL_F = [64, 32, num_mesh_output_chan]
self.CL_K = [3, 3]
self.layers_per_block = [2, 2]
self.FC_F = [num_input_chan, 512, self.CL_F[0] * self.graph_L[-1].shape[0]]
self.fc = nn.Sequential()
for fc_id in range(len(self.FC_F) - 1):
if fc_id == 0:
use_activation = True
else:
use_activation = False
self.fc.add_module('fc_%d' % (fc_id + 1), FCLayer(self.FC_F[fc_id],
self.FC_F[fc_id + 1], use_dropout=False,
use_activation=use_activation))
_cl = []
_bn = []
for block_i in range(len(self.CL_F) - 1):
for layer_i in range(self.layers_per_block[block_i]):
Fin = self.CL_K[block_i] * self.CL_F[block_i]
if layer_i is not self.layers_per_block[block_i] - 1:
Fout = self.CL_F[block_i]
else:
Fout = self.CL_F[block_i + 1]
_cl.append(nn.Linear(Fin, Fout))
scale = np.sqrt(2.0 / (Fin + Fout))
_cl[-1].weight.data.uniform_(-scale, scale)
_cl[-1].bias.data.fill_(0.0)
if block_i == len(self.CL_F) - 2 and layer_i == self.layers_per_block[block_i] - 1:
_bn.append(None)
else:
_bn.append(nn.BatchNorm1d(Fout))
self.cl = nn.ModuleList(_cl)
self.bn = nn.ModuleList(_bn)
# convert scipy sparse matric L to pytorch
for graph_i in range(len(graph_L)):
self.graph_L[graph_i] = sparse_python_to_torch(self.graph_L[graph_i])
def init_weights(self, W, Fin, Fout):
scale = np.sqrt(2.0 / (Fin + Fout))
W.uniform_(-scale, scale)
return W
def graph_conv_cheby(self, x, cl, bn, L, Fout, K):
# parameters
# B = batch size
# V = nb vertices
# Fin = nb input features
# Fout = nb output features
# K = Chebyshev order & support size
B, V, Fin = x.size()
B, V, Fin = int(B), int(V), int(Fin)
# transform to Chebyshev basis
x0 = x.permute(1, 2, 0).contiguous() # V x Fin x B
x0 = x0.view([V, Fin * B]) # V x Fin*B
x = x0.unsqueeze(0) # 1 x V x Fin*B
def concat(x, x_):
x_ = x_.unsqueeze(0) # 1 x V x Fin*B
return torch.cat((x, x_), 0) # K x V x Fin*B
if K > 1:
x1 = my_sparse_mm()(L, x0) # V x Fin*B
x = torch.cat((x, x1.unsqueeze(0)), 0) # 2 x V x Fin*B
for k in range(2, K):
x2 = 2 * my_sparse_mm()(L, x1) - x0
x = torch.cat((x, x2.unsqueeze(0)), 0) # M x Fin*B
x0, x1 = x1, x2
x = x.view([K, V, Fin, B]) # K x V x Fin x B
x = x.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K
x = x.view([B * V, Fin * K]) # B*V x Fin*K
# Compose linearly Fin features to get Fout features
x = cl(x) # B*V x Fout
if bn is not None:
x = bn(x) # B*V x Fout
x = x.view([B, V, Fout]) # B x V x Fout
return x
# Upsampling of size p.
def graph_upsample(self, x, p):
if p > 1:
x = x.permute(0, 2, 1).contiguous() # x = B x F x V
x = nn.Upsample(scale_factor=p)(x) # B x F x (V*p)
x = x.permute(0, 2, 1).contiguous() # x = B x (V*p) x F
return x
else:
return x
def forward(self, x):
# x: B x num_input_chan
x = self.fc(x)
# x: B x (self.CL_F[0] * self.graph_L[-1].shape[0])
x = x.view(-1, self.graph_L[-1].shape[0], self.CL_F[0])
# x: B x 80 x 64
cl_i = 0
for block_i in range(len(self.CL_F) - 1):
x = self.graph_upsample(x, 2)
x = self.graph_upsample(x, 2)
for layer_i in range(self.layers_per_block[block_i]):
if layer_i is not self.layers_per_block[block_i] - 1:
Fout = self.CL_F[block_i]
else:
Fout = self.CL_F[block_i + 1]
x = self.graph_conv_cheby(x, self.cl[cl_i], self.bn[cl_i], self.graph_L[-(block_i * 2 + 3)],
# 2 - block_i*2],
Fout, self.CL_K[block_i])
if block_i is not len(self.CL_F) - 2 or layer_i is not self.layers_per_block[block_i] - 1:
x = F.relu(x)
cl_i = cl_i + 1
return x # x: B x 1280 x 3
| 7,647 | 31.824034 | 108 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_zoo/__init__.py | import torch.nn as nn
from .fc import build_fc_layer
from .hrnet import get_hrnet, Bottleneck
from .coarsening import build_graph
from .graph_utils import graph_upsample, graph_avg_pool
__all__ = ['build_fc_layer', 'get_hrnet', 'Bottleneck',
'build_graph', 'GCN_vert_convert', 'graph_upsample', 'graph_avg_pool',
'weights_init', 'conv1x1', 'conv3x3', 'deconv3x3']
class noop(nn.Module):
def forward(self, x):
return x
def build_activate_layer(actType):
if actType == 'relu':
return nn.ReLU(inplace=True)
elif actType == 'lrelu':
return nn.LeakyReLU(0.1, inplace=True)
elif actType == 'elu':
return nn.ELU(inplace=True)
elif actType == 'sigmoid':
return nn.Sigmoid()
elif actType == 'tanh':
return nn.Tanh()
elif actType == 'noop':
return noop()
else:
raise RuntimeError('no such activate layer!')
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.kaiming_normal_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.kaiming_normal_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class unFlatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1, 1, 1)
def conv1x1(in_channels, out_channels, stride=1, bn_init_zero=False, actFun='relu'):
bn = nn.BatchNorm2d(out_channels)
nn.init.constant_(bn.weight, 0. if bn_init_zero else 1.)
layers = [nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False),
build_activate_layer(actFun),
bn]
return nn.Sequential(*layers)
def conv3x3(in_channels, out_channels, stride=1, bn_init_zero=False, actFun='relu'):
bn = nn.BatchNorm2d(out_channels)
nn.init.constant_(bn.weight, 0. if bn_init_zero else 1.)
layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
build_activate_layer(actFun),
bn]
return nn.Sequential(*layers)
def deconv3x3(in_channels, out_channels, stride=1, bn_init_zero=False, actFun='relu'):
bn = nn.BatchNorm2d(out_channels)
nn.init.constant_(bn.weight, 0. if bn_init_zero else 1.)
return nn.Sequential(
nn.Upsample(scale_factor=stride, mode='bilinear', align_corners=True),
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
build_activate_layer(actFun),
bn
)
class GCN_vert_convert():
def __init__(self, vertex_num=1, graph_perm_reverse=[0], graph_perm=[0]):
self.graph_perm_reverse = graph_perm_reverse[:vertex_num]
self.graph_perm = graph_perm
def vert_to_GCN(self, x):
# x: B x v x f
return x[:, self.graph_perm]
def GCN_to_vert(self, x):
# x: B x v x f
return x[:, self.graph_perm_reverse]
| 3,086 | 30.824742 | 104 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_zoo/coarsening.py | import numpy as np
import scipy.sparse
from scipy.sparse.linalg import eigsh
import torch
# forked from https://github.com/3d-hand-shape/hand-graph-cnn
def laplacian(W, normalized=True):
"""Return graph Laplacian"""
# Degree matrix.
d = W.sum(axis=0)
# Laplacian matrix.
if not normalized:
D = scipy.sparse.diags(d.A.squeeze(), 0)
L = D - W
else:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = scipy.sparse.diags(d.A.squeeze(), 0)
Imat = scipy.sparse.identity(d.size, dtype=W.dtype)
L = Imat - D * W * D
assert np.abs(L - L.T).mean() < 1e-9
assert type(L) is scipy.sparse.csr.csr_matrix
return L
def rescale_L(L, lmax=2):
"""Rescale Laplacian eigenvalues to [-1,1]"""
M, M = L.shape
Imat = scipy.sparse.identity(M, format='csr', dtype=L.dtype)
L /= lmax * 2 # L = 2.0*L / lmax
L -= Imat
return L
def lmax_L(L):
"""Compute largest Laplacian eigenvalue"""
return eigsh(L, k=1, which='LM', return_eigenvectors=False)[0]
# graph coarsening with Heavy Edge Matching
# forked from https://github.com/xbresson/spectral_graph_convnets
def coarsen(A, levels):
graphs, parents = HEM(A, levels)
perms = compute_perm(parents)
adjacencies = []
laplacians = []
for i, A in enumerate(graphs):
M, M = A.shape
if i < levels:
A = perm_adjacency(A, perms[i])
A = A.tocsr()
A.eliminate_zeros()
adjacencies.append(A)
# Mnew, Mnew = A.shape
# print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added), |E| = {3} edges'.format(i, Mnew, Mnew - M, A.nnz // 2))
L = laplacian(A, normalized=True)
laplacians.append(L)
return adjacencies, laplacians, perms[0] if len(perms) > 0 else None
def HEM(W, levels, rid=None):
"""
Coarsen a graph multiple times using the Heavy Edge Matching (HEM).
Input
W: symmetric sparse weight (adjacency) matrix
levels: the number of coarsened graphs
Output
graph[0]: original graph of size N_1
graph[2]: coarser graph of size N_2 < N_1
graph[levels]: coarsest graph of Size N_levels < ... < N_2 < N_1
parents[i] is a vector of size N_i with entries ranging from 1 to N_{i+1}
which indicate the parents in the coarser graph[i+1]
nd_sz{i} is a vector of size N_i that contains the size of the supernode in the graph{i}
Note
if "graph" is a list of length k, then "parents" will be a list of length k-1
"""
N, N = W.shape
if rid is None:
rid = np.random.permutation(range(N))
ss = np.array(W.sum(axis=0)).squeeze()
rid = np.argsort(ss)
parents = []
degree = W.sum(axis=0) - W.diagonal()
graphs = []
graphs.append(W)
# print('Heavy Edge Matching coarsening with Xavier version')
for _ in range(levels):
# CHOOSE THE WEIGHTS FOR THE PAIRING
# weights = ones(N,1) # metis weights
weights = degree # graclus weights
# weights = supernode_size # other possibility
weights = np.array(weights).squeeze()
# PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR
idx_row, idx_col, val = scipy.sparse.find(W)
cc = idx_row
rr = idx_col
vv = val
# TO BE SPEEDUP
if not (list(cc) == list(np.sort(cc))):
tmp = cc
cc = rr
rr = tmp
cluster_id = HEM_one_level(cc, rr, vv, rid, weights) # cc is ordered
parents.append(cluster_id)
# COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH
nrr = cluster_id[rr]
ncc = cluster_id[cc]
nvv = vv
Nnew = cluster_id.max() + 1
# CSR is more appropriate: row,val pairs appear multiple times
W = scipy.sparse.csr_matrix((nvv, (nrr, ncc)), shape=(Nnew, Nnew))
W.eliminate_zeros()
# Add new graph to the list of all coarsened graphs
graphs.append(W)
N, N = W.shape
# COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)
degree = W.sum(axis=0)
# degree = W.sum(axis=0) - W.diagonal()
# CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS
# [~, rid]=sort(ss); # arthur strategy
# [~, rid]=sort(supernode_size); # thomas strategy
# rid=randperm(N); # metis/graclus strategy
ss = np.array(W.sum(axis=0)).squeeze()
rid = np.argsort(ss)
return graphs, parents
# Coarsen a graph given by rr,cc,vv. rr is assumed to be ordered
def HEM_one_level(rr, cc, vv, rid, weights):
nnz = rr.shape[0]
N = rr[nnz - 1] + 1
marked = np.zeros(N, bool)
rowstart = np.zeros(N, np.int32)
rowlength = np.zeros(N, np.int32)
cluster_id = np.zeros(N, np.int32)
oldval = rr[0]
count = 0
clustercount = 0
for ii in range(nnz):
rowlength[count] = rowlength[count] + 1
if rr[ii] > oldval:
oldval = rr[ii]
rowstart[count + 1] = ii
count = count + 1
for ii in range(N):
tid = rid[ii]
if not marked[tid]:
wmax = 0.0
rs = rowstart[tid]
marked[tid] = True
bestneighbor = -1
for jj in range(rowlength[tid]):
nid = cc[rs + jj]
if marked[nid]:
tval = 0.0
else:
# First approach
if 2 == 1:
tval = vv[rs + jj] * (1.0 / weights[tid] + 1.0 / weights[nid])
# Second approach
if 1 == 1:
Wij = vv[rs + jj]
Wii = vv[rowstart[tid]]
Wjj = vv[rowstart[nid]]
di = weights[tid]
dj = weights[nid]
tval = (2. * Wij + Wii + Wjj) * 1. / (di + dj + 1e-9)
if tval > wmax:
wmax = tval
bestneighbor = nid
cluster_id[tid] = clustercount
if bestneighbor > -1:
cluster_id[bestneighbor] = clustercount
marked[bestneighbor] = True
clustercount += 1
return cluster_id
def compute_perm(parents):
"""
Return a list of indices to reorder the adjacency and data matrices so
that the union of two neighbors from layer to layer forms a binary tree.
"""
# Order of last layer is random (chosen by the clustering algorithm).
indices = []
if len(parents) > 0:
M_last = max(parents[-1]) + 1
indices.append(list(range(M_last)))
for parent in parents[::-1]:
# Fake nodes go after real ones.
pool_singeltons = len(parent)
indices_layer = []
for i in indices[-1]:
indices_node = list(np.where(parent == i)[0])
assert 0 <= len(indices_node) <= 2
# Add a node to go with a singelton.
if len(indices_node) == 1:
indices_node.append(pool_singeltons)
pool_singeltons += 1
# Add two nodes as children of a singelton in the parent.
elif len(indices_node) == 0:
indices_node.append(pool_singeltons + 0)
indices_node.append(pool_singeltons + 1)
pool_singeltons += 2
indices_layer.extend(indices_node)
indices.append(indices_layer)
# Sanity checks.
for i, indices_layer in enumerate(indices):
M = M_last * 2 ** i
# Reduction by 2 at each layer (binary tree).
assert len(indices[0] == M)
# The new ordering does not omit an indice.
assert sorted(indices_layer) == list(range(M))
return indices[::-1]
assert (compute_perm([np.array([4, 1, 1, 2, 2, 3, 0, 0, 3]), np.array([2, 1, 0, 1, 0])])
== [[3, 4, 0, 9, 1, 2, 5, 8, 6, 7, 10, 11], [2, 4, 1, 3, 0, 5], [0, 1, 2]])
def perm_adjacency(A, indices):
"""
Permute adjacency matrix, i.e. exchange node ids,
so that binary unions form the clustering tree.
"""
if indices is None:
return A
M, M = A.shape
Mnew = len(indices)
A = A.tocoo()
# Add Mnew - M isolated vertices.
rows = scipy.sparse.coo_matrix((Mnew - M, M), dtype=np.float32)
cols = scipy.sparse.coo_matrix((Mnew, Mnew - M), dtype=np.float32)
A = scipy.sparse.vstack([A, rows])
A = scipy.sparse.hstack([A, cols])
# Permute the rows and the columns.
perm = np.argsort(indices)
A.row = np.array(perm)[A.row]
A.col = np.array(perm)[A.col]
assert np.abs(A - A.T).mean() < 1e-8 # 1e-9
assert type(A) is scipy.sparse.coo.coo_matrix
return A
"""need to be modified to adapted to F features"""
def perm_data(x, indices):
"""
Permute data matrix, i.e. exchange node ids,
so that binary unions form the clustering tree.
"""
if indices is None:
return x
M, F = x.shape
Mnew = len(indices)
assert Mnew >= M
xnew = np.empty((Mnew, F)) # """need to be modified to adapted to F features"""
for i, j in enumerate(indices):
# Existing vertex, i.e. real data.
if j < M:
xnew[i, :] = x[j, :]
# Fake vertex because of singeltons.
# They will stay 0 so that max pooling chooses the singelton.
# Or -infty ?
else:
"""need to be modified to adapted to F features and negative values"""
"""np.full((2, 2), -np.inf)"""
xnew[i, :] = np.zeros((F)) # np.full((F), -np.inf) #
return xnew
def perm_index_reverse(indices):
indices_reverse = np.copy(indices)
for i, j in enumerate(indices):
indices_reverse[j] = i
return indices_reverse
def perm_tri(tri, indices):
"""
tri: T x 3
"""
indices_reverse = perm_index_reverse(indices)
tri_new = np.copy(tri)
for i in range(len(tri)):
tri_new[i, 0] = indices_reverse[tri[i, 0]]
tri_new[i, 1] = indices_reverse[tri[i, 1]]
tri_new[i, 2] = indices_reverse[tri[i, 2]]
return tri_new
def build_adj_mat(faces, num_vertex=None):
"""
:param faces: T x 3
:return: adj: sparse matrix, V x V (torch.sparse.FloatTensor)
"""
if num_vertex is None:
num_vertex = np.max(faces) + 1
num_tri = faces.shape[0]
edges = np.empty((num_tri * 3, 2))
for i_tri in range(num_tri):
edges[i_tri * 3] = faces[i_tri, :2]
edges[i_tri * 3 + 1] = faces[i_tri, 1:]
edges[i_tri * 3 + 2] = faces[i_tri, [0, 2]]
adj = scipy.sparse.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(num_vertex, num_vertex), dtype=np.float32)
adj = adj - (adj > 1) * 1.0
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# adj = normalize_sparse_mx(adj + sp.eye(adj.shape[0]))
# adj = sparse_mx_to_torch_sparse_tensor(adj)
return adj
def cut_perm(perm_list, level, N):
perm = torch.tensor(perm_list)
perm[perm > (N - 1)] = -1
for ll in range(level):
perm = perm.view(-1, 2**(ll + 1))
start = 0
mid = perm.shape[1] // 2
end = perm.shape[1]
for i in range(perm.shape[0]):
if perm[i, start] == -1:
perm[i, start:mid] = perm[i, mid:end]
if perm[i, mid] == -1:
perm[i, mid:end] = perm[i, start:mid]
perm = perm.view(-1)
perm = perm.tolist()
return perm
def build_graph(faces, coarsening_levels=4):
"""
Build graph for Hand Mesh
"""
joints_num = faces.max() + 1
# Build adj mat
hand_mesh_adj = build_adj_mat(faces, joints_num)
# Compute coarsened graphs
graph_Adj, graph_L, graph_perm = coarsen(hand_mesh_adj, coarsening_levels)
graph_mask = torch.from_numpy((np.array(graph_perm) < faces.max() + 1).astype(float)).float()
graph_mask = graph_mask # V
# Compute max eigenvalue of graph Laplacians, rescale Laplacian
graph_lmax = []
for i in range(coarsening_levels):
graph_lmax.append(lmax_L(graph_L[i]))
graph_L[i] = rescale_L(graph_L[i], graph_lmax[i])
graph_perm_reverse = perm_index_reverse(graph_perm)
graph_perm = cut_perm(graph_perm, coarsening_levels, joints_num)
graph_dict = {'mesh_faces': faces,
'mesh_adj': hand_mesh_adj,
'graph_mask': graph_mask,
'coarsen_graphs_adj': graph_Adj,
'coarsen_graphs_L': graph_L,
'graph_perm': graph_perm,
'graph_perm_reverse': graph_perm_reverse}
return graph_dict
| 12,729 | 28.67366 | 120 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_attn/DualGraph.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .gcn import GraphLayer
from .img_attn import img_ex
from .inter_attn import inter_attn
def graph_upsample(x, p):
if p > 1:
x = x.permute(0, 2, 1).contiguous() # x = B x F x V
x = nn.Upsample(scale_factor=p)(x) # B x F x (V*p)
x = x.permute(0, 2, 1).contiguous() # x = B x (V*p) x F
return x
else:
return x
class DualGraphLayer(nn.Module):
def __init__(self,
verts_in_dim=256,
verts_out_dim=256,
graph_L_Left=None,
graph_L_Right=None,
graph_k=2,
graph_layer_num=4,
img_size=64,
img_f_dim=256,
grid_size=8,
grid_f_dim=128,
n_heads=4,
dropout=0.01):
super().__init__()
self.verts_num = graph_L_Left.shape[0]
self.verts_in_dim = verts_in_dim
self.img_size = img_size
self.img_f_dim = img_f_dim
self.position_embeddings = nn.Embedding(self.verts_num, self.verts_in_dim)
self.graph_left = GraphLayer(verts_in_dim, verts_out_dim,
graph_L_Left, graph_k, graph_layer_num,
dropout)
self.graph_right = GraphLayer(verts_in_dim, verts_out_dim,
graph_L_Right, graph_k, graph_layer_num,
dropout)
self.img_ex_left = img_ex(img_size, img_f_dim,
grid_size, grid_f_dim,
verts_out_dim,
n_heads=n_heads,
dropout=dropout)
self.img_ex_right = img_ex(img_size, img_f_dim,
grid_size, grid_f_dim,
verts_out_dim,
n_heads=n_heads,
dropout=dropout)
self.attn = inter_attn(verts_out_dim, n_heads=n_heads, dropout=dropout)
def forward(self, Lf, Rf, img_f):
BS1, V, f = Lf.shape
assert V == self.verts_num
assert f == self.verts_in_dim
BS2, V, f = Rf.shape
assert V == self.verts_num
assert f == self.verts_in_dim
BS3, C, H, W = img_f.shape
assert C == self.img_f_dim
assert H == self.img_size
assert W == self.img_size
assert BS1 == BS2
assert BS2 == BS3
BS = BS1
position_ids = torch.arange(self.verts_num, dtype=torch.long, device=Lf.device)
position_ids = position_ids.unsqueeze(0).repeat(BS, 1)
position_embeddings = self.position_embeddings(position_ids)
Lf = Lf + position_embeddings
Rf = Rf + position_embeddings
Lf = self.graph_left(Lf)
Rf = self.graph_right(Rf)
Lf = self.img_ex_left(img_f, Lf)
Rf = self.img_ex_right(img_f, Rf)
Lf, Rf = self.attn(Lf, Rf)
return Lf, Rf
class DualGraph(nn.Module):
def __init__(self,
verts_in_dim=[512, 256, 128],
verts_out_dim=[256, 128, 64],
graph_L_Left=None,
graph_L_Right=None,
graph_k=[2, 2, 2],
graph_layer_num=[4, 4, 4],
img_size=[16, 32, 64],
img_f_dim=[256, 256, 256],
grid_size=[8, 8, 16],
grid_f_dim=[256, 128, 64],
n_heads=4,
dropout=0.01):
super().__init__()
for i in range(len(verts_in_dim) - 1):
assert verts_out_dim[i] == verts_in_dim[i + 1]
for i in range(len(verts_in_dim) - 1):
assert graph_L_Left[i + 1].shape[0] == 2 * graph_L_Left[i].shape[0]
assert graph_L_Right[i + 1].shape[0] == 2 * graph_L_Right[i].shape[0]
self.layers = nn.ModuleList()
for i in range(len(verts_in_dim)):
self.layers.append(DualGraphLayer(verts_in_dim=verts_in_dim[i],
verts_out_dim=verts_out_dim[i],
graph_L_Left=graph_L_Left[i],
graph_L_Right=graph_L_Right[i],
graph_k=graph_k[i],
graph_layer_num=graph_layer_num[i],
img_size=img_size[i],
img_f_dim=img_f_dim[i],
grid_size=grid_size[i],
grid_f_dim=grid_f_dim[i],
n_heads=n_heads,
dropout=dropout))
def forward(self, Lf, Rf, img_f_list):
assert len(img_f_list) == len(self.layers)
for i in range(len(self.layers)):
Lf, Rf = self.layers[i](Lf, Rf, img_f_list[i])
if i != len(self.layers) - 1:
Lf = graph_upsample(Lf, 2)
Rf = graph_upsample(Rf, 2)
return Lf, Rf
| 5,266 | 36.621429 | 87 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_attn/inter_attn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .self_attn import SelfAttn
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
class MLP_res_block(nn.Module):
def __init__(self, in_dim, hid_dim, dropout=0.1):
super().__init__()
self.layer_norm = nn.LayerNorm(in_dim, eps=1e-6)
self.fc1 = nn.Linear(in_dim, hid_dim)
self.fc2 = nn.Linear(hid_dim, in_dim)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def _ff_block(self, x):
x = self.fc2(self.dropout1(F.relu(self.fc1(x))))
return self.dropout2(x)
def forward(self, x):
x = x + self._ff_block(self.layer_norm(x))
return x
class inter_attn(nn.Module):
def __init__(self, f_dim, n_heads=4, d_q=None, d_v=None, dropout=0.1):
super().__init__()
self.L_self_attn_layer = SelfAttn(f_dim, n_heads=n_heads, hid_dim=f_dim, dropout=dropout)
self.R_self_attn_layer = SelfAttn(f_dim, n_heads=n_heads, hid_dim=f_dim, dropout=dropout)
self.build_inter_attn(f_dim, n_heads, d_q, d_v, dropout)
for m in self.modules():
weights_init(m)
def build_inter_attn(self, f_dim, n_heads=4, d_q=None, d_v=None, dropout=0.1):
if d_q is None:
d_q = f_dim // n_heads
if d_v is None:
d_v = f_dim // n_heads
self.n_heads = n_heads
self.d_q = d_q
self.d_v = d_v
self.norm = d_q ** 0.5
self.f_dim = f_dim
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.w_qs = nn.Linear(f_dim, n_heads * d_q)
self.w_ks = nn.Linear(f_dim, n_heads * d_q)
self.w_vs = nn.Linear(f_dim, n_heads * d_v)
self.fc = nn.Linear(n_heads * d_v, f_dim)
self.layer_norm1 = nn.LayerNorm(f_dim, eps=1e-6)
self.layer_norm2 = nn.LayerNorm(f_dim, eps=1e-6)
self.ffL = MLP_res_block(f_dim, f_dim, dropout)
self.ffR = MLP_res_block(f_dim, f_dim, dropout)
def inter_attn(self, Lf, Rf, mask_L2R=None, mask_R2L=None):
BS, V, fdim = Lf.shape
assert fdim == self.f_dim
BS, V, fdim = Rf.shape
assert fdim == self.f_dim
Lf2 = self.layer_norm1(Lf)
Rf2 = self.layer_norm2(Rf)
Lq = self.w_qs(Lf2).view(BS, V, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
Lk = self.w_ks(Lf2).view(BS, V, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
Lv = self.w_vs(Lf2).view(BS, V, self.n_heads, self.d_v).transpose(1, 2) # BS x h x V x v
Rq = self.w_qs(Rf2).view(BS, V, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
Rk = self.w_ks(Rf2).view(BS, V, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
Rv = self.w_vs(Rf2).view(BS, V, self.n_heads, self.d_v).transpose(1, 2) # BS x h x V x v
attn_R2L = torch.matmul(Lq, Rk.transpose(-1, -2)) / self.norm # bs, h, V, V
attn_L2R = torch.matmul(Rq, Lk.transpose(-1, -2)) / self.norm # bs, h, V, V
if mask_L2R is not None:
attn_L2R = attn_L2R.masked_fill(mask_L2R == 0, -1e9)
if mask_R2L is not None:
attn_R2L = attn_R2L.masked_fill(mask_R2L == 0, -1e9)
attn_R2L = F.softmax(attn_R2L, dim=-1) # bs, h, V, V
attn_L2R = F.softmax(attn_L2R, dim=-1) # bs, h, V, V
attn_R2L = self.dropout1(attn_R2L)
attn_L2R = self.dropout1(attn_L2R)
feat_L2R = torch.matmul(attn_L2R, Lv).transpose(1, 2).contiguous().view(BS, V, -1)
feat_R2L = torch.matmul(attn_R2L, Rv).transpose(1, 2).contiguous().view(BS, V, -1)
feat_L2R = self.dropout2(self.fc(feat_L2R))
feat_R2L = self.dropout2(self.fc(feat_R2L))
Lf = self.ffL(Lf + feat_R2L)
Rf = self.ffR(Rf + feat_L2R)
return Lf, Rf
def forward(self, Lf, Rf, mask_L2R=None, mask_R2L=None):
BS, V, fdim = Lf.shape
assert fdim == self.f_dim
BS, V, fdim = Rf.shape
assert fdim == self.f_dim
Lf = self.L_self_attn_layer(Lf)
Rf = self.R_self_attn_layer(Rf)
Lf, Rf = self.inter_attn(Lf, Rf, mask_L2R, mask_R2L)
return Lf, Rf
| 4,522 | 34.896825 | 97 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_attn/gcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
def sparse_python_to_torch(sp_python):
L = sp_python.tocoo()
indices = np.column_stack((L.row, L.col)).T
indices = indices.astype(np.int64)
indices = torch.from_numpy(indices)
indices = indices.type(torch.LongTensor)
L_data = L.data.astype(np.float32)
L_data = torch.from_numpy(L_data)
L_data = L_data.type(torch.FloatTensor)
L = torch.sparse.FloatTensor(indices, L_data, torch.Size(L.shape))
# if torch.cuda.is_available():
# L = L.cuda()
return L
def graph_conv_cheby(x, cl, L, K=3):
# parameters
# B = batch size
# V = nb vertices
# Fin = nb input features
# Fout = nb output features
# K = Chebyshev order & support size
B, V, Fin = x.size()
B, V, Fin = int(B), int(V), int(Fin)
# transform to Chebyshev basis
x0 = x.permute(1, 2, 0).contiguous() # V x Fin x B
x0 = x0.view([V, Fin * B]) # V x Fin*B
x = x0.unsqueeze(0) # 1 x V x Fin*B
def concat(x, x_):
x_ = x_.unsqueeze(0) # 1 x V x Fin*B
return torch.cat((x, x_), 0) # K x V x Fin*B
if K > 1:
x1 = torch.mm(L, x0) # V x Fin*B
x = torch.cat((x, x1.unsqueeze(0)), 0) # 2 x V x Fin*B
for k in range(2, K):
x2 = 2 * torch.mm(L, x1) - x0
x = torch.cat((x, x2.unsqueeze(0)), 0) # M x Fin*B
x0, x1 = x1, x2
x = x.view([K, V, Fin, B]) # K x V x Fin x B
x = x.permute(3, 1, 2, 0).contiguous() # B x V x Fin x K
x = x.view([B * V, Fin * K]) # B*V x Fin*K
# Compose linearly Fin features to get Fout features
x = cl(x) # B*V x Fout
x = x.view([B, V, -1]) # B x V x Fout
return x
class GCN_ResBlock(nn.Module):
# x______________conv + norm (optianal)_____________ x ____activate
# \____conv____activate____norm____conv____norm____/
def __init__(self, in_dim, out_dim, mid_dim,
graph_L, graph_k,
drop_out=0.01):
super(GCN_ResBlock, self).__init__()
if isinstance(graph_L, np.ndarray):
self.register_buffer('graph_L',
torch.from_numpy(graph_L).float(),
persistent=False)
else:
self.register_buffer('graph_L',
sparse_python_to_torch(graph_L).to_dense(),
persistent=False)
self.graph_k = graph_k
self.in_dim = in_dim
self.norm1 = nn.LayerNorm(in_dim, eps=1e-6)
self.fc1 = nn.Linear(in_dim * graph_k, mid_dim)
self.norm2 = nn.LayerNorm(out_dim, eps=1e-6)
self.fc2 = nn.Linear(mid_dim * graph_k, out_dim)
self.dropout = nn.Dropout(drop_out)
self.shortcut = nn.Linear(in_dim, out_dim)
self.norm3 = nn.LayerNorm(out_dim, eps=1e-6)
def forward(self, x):
# x : B x V x f
assert x.shape[-1] == self.in_dim
x1 = F.relu(self.norm1(x))
x1 = graph_conv_cheby(x, self.fc1, self.graph_L, K=self.graph_k)
x1 = F.relu(self.norm2(x1))
x1 = graph_conv_cheby(x1, self.fc2, self.graph_L, K=self.graph_k)
x1 = self.dropout(x1)
x2 = self.shortcut(x)
return self.norm3(x1 + x2)
class GraphLayer(nn.Module):
def __init__(self,
in_dim=256,
out_dim=256,
graph_L=None,
graph_k=2,
graph_layer_num=3,
drop_out=0.01):
super().__init__()
assert graph_k > 1
self.GCN_blocks = nn.ModuleList()
self.GCN_blocks.append(GCN_ResBlock(in_dim, out_dim, out_dim, graph_L, graph_k, drop_out))
for i in range(graph_layer_num - 1):
self.GCN_blocks.append(GCN_ResBlock(out_dim, out_dim, out_dim, graph_L, graph_k, drop_out))
for m in self.modules():
weights_init(m)
def forward(self, verts_f):
for i in range(len(self.GCN_blocks)):
verts_f = self.GCN_blocks[i](verts_f)
if i != (len(self.GCN_blocks) - 1):
verts_f = F.relu(verts_f)
return verts_f
| 4,537 | 31.647482 | 103 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_attn/img_attn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .self_attn import SelfAttn
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
class MLP_res_block(nn.Module):
def __init__(self, in_dim, hid_dim, dropout=0.1):
super().__init__()
self.layer_norm = nn.LayerNorm(in_dim, eps=1e-6)
self.fc1 = nn.Linear(in_dim, hid_dim)
self.fc2 = nn.Linear(hid_dim, in_dim)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def _ff_block(self, x):
x = self.fc2(self.dropout1(F.relu(self.fc1(x))))
return self.dropout2(x)
def forward(self, x):
x = x + self._ff_block(self.layer_norm(x))
return x
class img_feat_to_grid(nn.Module):
def __init__(self, img_size, img_f_dim, grid_size, grid_f_dim, n_heads=4, dropout=0.01):
super().__init__()
self.img_f_dim = img_f_dim
self.img_size = img_size
self.grid_f_dim = grid_f_dim
self.grid_size = grid_size
self.position_embeddings = nn.Embedding(grid_size * grid_size, grid_f_dim)
patch_size = img_size // grid_size
self.proj = nn.Conv2d(img_f_dim, grid_f_dim, kernel_size=patch_size, stride=patch_size)
self.self_attn = SelfAttn(grid_f_dim, n_heads=n_heads, hid_dim=grid_f_dim, dropout=dropout)
def forward(self, img):
bs = img.shape[0]
assert img.shape[1] == self.img_f_dim
assert img.shape[2] == self.img_size
assert img.shape[3] == self.img_size
position_ids = torch.arange(self.grid_size * self.grid_size, dtype=torch.long, device=img.device)
position_ids = position_ids.unsqueeze(0).repeat(bs, 1)
position_embeddings = self.position_embeddings(position_ids)
grid_feat = F.relu(self.proj(img))
grid_feat = grid_feat.view(bs, self.grid_f_dim, -1).transpose(-1, -2)
grid_feat = grid_feat + position_embeddings
grid_feat = self.self_attn(grid_feat)
return grid_feat
class img_attn(nn.Module):
def __init__(self, verts_f_dim, img_f_dim, n_heads=4, d_q=None, d_v=None, dropout=0.1):
super().__init__()
self.img_f_dim = img_f_dim
self.verts_f_dim = verts_f_dim
self.fc = nn.Linear(img_f_dim, verts_f_dim)
self.Attn = SelfAttn(verts_f_dim, n_heads=n_heads, hid_dim=verts_f_dim, dropout=dropout)
def forward(self, verts_f, img_f):
assert verts_f.shape[2] == self.verts_f_dim
assert img_f.shape[2] == self.img_f_dim
assert verts_f.shape[0] == img_f.shape[0]
org_verts_f_shape = verts_f.shape
V = verts_f.shape[1]
img_f = self.fc(img_f)
verts_f = verts_f.reshape(-1, 1, verts_f.shape[2])
img_f = img_f.reshape(img_f.shape[0], 1, img_f.shape[1], img_f.shape[2])
img_f = torch.repeat_interleave(img_f, V, dim=1)
img_f = img_f.reshape(-1, img_f.shape[2], img_f.shape[3])
#verts_f = verts_f.transpose(0,1)
#img_f = torch.repeat_interleave(img_f, V, dim=0)
x = torch.cat([verts_f, img_f], dim=1)
x = self.Attn(x)
verts_f = x[:, :1]
#verts_f = verts_f.transpose(0,1)
verts_f = verts_f.reshape(org_verts_f_shape)
return verts_f
class img_ex(nn.Module):
def __init__(self, img_size, img_f_dim,
grid_size, grid_f_dim,
verts_f_dim,
n_heads=4,
dropout=0.01):
super().__init__()
self.verts_f_dim = verts_f_dim
self.encoder = img_feat_to_grid(img_size, img_f_dim, grid_size, grid_f_dim, n_heads, dropout)
self.attn = img_attn(verts_f_dim, grid_f_dim, n_heads=n_heads, dropout=dropout)
for m in self.modules():
weights_init(m)
def forward(self, img, verts_f):
assert verts_f.shape[2] == self.verts_f_dim
grid_feat = self.encoder(img)
verts_f = self.attn(verts_f, grid_feat)
return verts_f
| 4,292 | 33.071429 | 105 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/models/model_attn/self_attn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(layer):
classname = layer.__class__.__name__
# print(classname)
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(layer.weight.data)
elif classname.find('Linear') != -1:
nn.init.xavier_uniform_(layer.weight.data)
if layer.bias is not None:
nn.init.constant_(layer.bias.data, 0.0)
class MLP_res_block(nn.Module):
def __init__(self, in_dim, hid_dim, dropout=0.1):
super().__init__()
self.layer_norm = nn.LayerNorm(in_dim, eps=1e-6)
self.fc1 = nn.Linear(in_dim, hid_dim)
self.fc2 = nn.Linear(hid_dim, in_dim)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def _ff_block(self, x):
x = self.fc2(self.dropout1(F.relu(self.fc1(x))))
return self.dropout2(x)
def forward(self, x):
x = x + self._ff_block(self.layer_norm(x))
return x
class SelfAttn(nn.Module):
def __init__(self, f_dim, hid_dim=None, n_heads=4, d_q=None, d_v=None, dropout=0.1):
super().__init__()
if d_q is None:
d_q = f_dim // n_heads
if d_v is None:
d_v = f_dim // n_heads
if hid_dim is None:
hid_dim = f_dim
self.n_heads = n_heads
self.d_q = d_q
self.d_v = d_v
self.norm = d_q ** 0.5
self.f_dim = f_dim
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.w_qs = nn.Linear(f_dim, n_heads * d_q)
self.w_ks = nn.Linear(f_dim, n_heads * d_q)
self.w_vs = nn.Linear(f_dim, n_heads * d_v)
self.layer_norm = nn.LayerNorm(f_dim, eps=1e-6)
self.fc = nn.Linear(n_heads * d_v, f_dim)
self.ff = MLP_res_block(f_dim, hid_dim, dropout)
def self_attn(self, x):
BS, V, f = x.shape
q = self.w_qs(x).view(BS, -1, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
k = self.w_ks(x).view(BS, -1, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
v = self.w_vs(x).view(BS, -1, self.n_heads, self.d_v).transpose(1, 2) # BS x h x V x v
attn = torch.matmul(q, k.transpose(-1, -2)) / self.norm # bs, h, V, V
attn = F.softmax(attn, dim=-1) # bs, h, V, V
attn = self.dropout1(attn)
out = torch.matmul(attn, v).transpose(1, 2).contiguous().view(BS, V, -1)
out = self.dropout2(self.fc(out))
return out
'''
BS, V, f = x.shape
q = self.w_qs(x).view(BS, -1, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
k = self.w_ks(x).view(BS, -1, self.n_heads, self.d_q).transpose(1, 2) # BS x h x V x q
v = self.w_vs(x).view(BS, -1, self.n_heads, self.d_v).transpose(1, 2) # BS x h x V x v
attn = torch.matmul(q, k.transpose(-1, -2)) / self.norm # bs, h, V, V
attn = F.softmax(attn, dim=-1) # bs, h, V, V
attn = self.dropout1(attn)
out = torch.matmul(attn, v).transpose(1, 2).contiguous().view(BS, V, -1)
out = self.dropout2(self.fc(out))
return out
'''
def forward(self, x):
BS, V, f = x.shape
assert f == self.f_dim
x = x + self.self_attn(self.layer_norm(x))
x = self.ff(x)
return x
| 3,346 | 31.813725 | 95 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/utils/utils.py | import numpy as np
import random
import math
import cv2 as cv
import pickle
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from utils.config import get_cfg_defaults
from models.model_zoo import build_graph
def projection(scale, trans2d, label3d, img_size=256):
scale = scale * img_size
trans2d = trans2d * img_size / 2 + img_size / 2
trans2d = trans2d
label2d = scale * label3d[:, :2] + trans2d
return label2d
def projection_batch(scale, trans2d, label3d, img_size=256):
"""orthodox projection
Input:
scale: (B)
trans2d: (B, 2)
label3d: (B x N x 3)
Returns:
(B, N, 2)
"""
scale = scale * img_size # bs
if scale.dim() == 1:
scale = scale.unsqueeze(-1).unsqueeze(-1)
if scale.dim() == 2:
scale = scale.unsqueeze(-1)
trans2d = trans2d * img_size / 2 + img_size / 2 # bs x 2
trans2d = trans2d.unsqueeze(1)
label2d = scale * label3d[..., :2] + trans2d
return label2d
def projection_batch_np(scale, trans2d, label3d, img_size=256):
"""orthodox projection
Input:
scale: (B)
trans2d: (B, 2)
label3d: (B x N x 3)
Returns:
(B, N, 2)
"""
scale = scale * img_size # bs
if scale.dim() == 1:
scale = scale[..., np.newaxis, np.newaxis]
if scale.dim() == 2:
scale = scale[..., np.newaxis]
trans2d = trans2d * img_size / 2 + img_size / 2 # bs x 2
trans2d = trans2d[:, np.newaxis, :]
label2d = scale * label3d[..., :2] + trans2d
return label2d
def get_mano_path():
cfg = get_cfg_defaults()
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
path = os.path.join(abspath, cfg.MISC.MANO_PATH)
mano_path = {'left': os.path.join(path, 'MANO_LEFT.pkl'),
'right': os.path.join(path, 'MANO_RIGHT.pkl')}
mano_path = {'right': '/workspace/AFOF/leap/body_models/mano/models/MANO_RIGHT.pkl',
'left': '/workspace/AFOF/leap/body_models/mano/models/MANO_LEFT.pkl'}
return mano_path
def get_graph_dict_path():
cfg = get_cfg_defaults()
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
graph_path = {'left': os.path.join(abspath, cfg.MISC.GRAPH_LEFT_DICT_PATH),
'right': os.path.join(abspath, cfg.MISC.GRAPH_RIGHT_DICT_PATH)}
return graph_path
def get_dense_color_path():
cfg = get_cfg_defaults()
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
dense_path = os.path.join(abspath, cfg.MISC.DENSE_COLOR)
return dense_path
def get_mano_seg_path():
cfg = get_cfg_defaults()
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
seg_path = os.path.join(abspath, cfg.MISC.MANO_SEG_PATH)
return seg_path
def get_upsample_path():
cfg = get_cfg_defaults()
abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
upsample_path = os.path.join(abspath, cfg.MISC.UPSAMPLE_PATH)
return upsample_path
def build_mano_graph():
graph_path = get_graph_dict_path()
mano_path = get_mano_path()
for hand_type in ['left', 'right']:
if not os.path.exists(graph_path[hand_type]):
manoData = pickle.load(open(mano_path[hand_type], 'rb'), encoding='latin1')
faces = manoData['f']
graph_dict = build_graph(faces, coarsening_levels=4)
with open(graph_path[hand_type], 'wb') as file:
pickle.dump(graph_dict, file)
class imgUtils():
@ staticmethod
def pad2squre(img, color=None):
if img.shape[0] > img.shape[1]:
W = img.shape[0] - img.shape[1]
else:
W = img.shape[1] - img.shape[0]
W1 = int(W / 2)
W2 = W - W1
if color is None:
if img.shape[2] == 3:
color = (0, 0, 0)
else:
color = 0
if img.shape[0] > img.shape[1]:
return cv.copyMakeBorder(img, 0, 0, W1, W2, cv.BORDER_CONSTANT, value=color)
else:
return cv.copyMakeBorder(img, W1, W2, 0, 0, cv.BORDER_CONSTANT, value=color)
@ staticmethod
def cut2squre(img):
if img.shape[0] > img.shape[1]:
s = int((img.shape[0] - img.shape[1]) / 2)
return img[s:(s + img.shape[1])]
else:
s = int((img.shape[1] - img.shape[0]) / 2)
return img[:, s:(s + img.shape[0])]
@ staticmethod
def get_scale_mat(center, scale=1.0):
scaleMat = np.zeros((3, 3), dtype='float32')
scaleMat[0, 0] = scale
scaleMat[1, 1] = scale
scaleMat[2, 2] = 1.0
t = np.matmul((np.identity(3, dtype='float32') - scaleMat), center)
scaleMat[0, 2] = t[0]
scaleMat[1, 2] = t[1]
return scaleMat
@ staticmethod
def get_rotation_mat(center, theta=0):
t = theta * (3.14159 / 180)
rotationMat = np.zeros((3, 3), dtype='float32')
rotationMat[0, 0] = math.cos(t)
rotationMat[0, 1] = -math.sin(t)
rotationMat[1, 0] = math.sin(t)
rotationMat[1, 1] = math.cos(t)
rotationMat[2, 2] = 1.0
t = np.matmul((np.identity(3, dtype='float32') - rotationMat), center)
rotationMat[0, 2] = t[0]
rotationMat[1, 2] = t[1]
return rotationMat
@ staticmethod
def get_rotation_mat3d(theta=0):
t = theta * (3.14159 / 180)
rotationMat = np.zeros((3, 3), dtype='float32')
rotationMat[0, 0] = math.cos(t)
rotationMat[0, 1] = -math.sin(t)
rotationMat[1, 0] = math.sin(t)
rotationMat[1, 1] = math.cos(t)
rotationMat[2, 2] = 1.0
return rotationMat
@ staticmethod
def get_affine_mat(theta=0, scale=1.0,
u=0, v=0,
height=480, width=640):
center = np.array([width / 2, height / 2, 1], dtype='float32')
rotationMat = imgUtils.get_rotation_mat(center, theta)
scaleMat = imgUtils.get_scale_mat(center, scale)
trans = np.identity(3, dtype='float32')
trans[0, 2] = u
trans[1, 2] = v
affineMat = np.matmul(scaleMat, rotationMat)
affineMat = np.matmul(trans, affineMat)
return affineMat
@staticmethod
def img_trans(theta, scale, u, v, img):
size = img.shape[0]
u = int(u * size / 2)
v = int(v * size / 2)
affineMat = imgUtils.get_affine_mat(theta=theta, scale=scale,
u=u, v=v,
height=256, width=256)
return cv.warpAffine(src=img,
M=affineMat[0:2, :],
dsize=(256, 256),
dst=img,
flags=cv.INTER_LINEAR,
borderMode=cv.BORDER_REPLICATE,
borderValue=(0, 0, 0)
)
@staticmethod
def data_augmentation(theta, scale, u, v,
img_list=None, label2d_list=None, label3d_list=None,
R=None,
img_size=224):
affineMat = imgUtils.get_affine_mat(theta=theta, scale=scale,
u=u, v=v,
height=img_size, width=img_size)
if img_list is not None:
img_list_out = []
for img in img_list:
img_list_out.append(cv.warpAffine(src=img,
M=affineMat[0:2, :],
dsize=(img_size, img_size)))
else:
img_list_out = None
if label2d_list is not None:
label2d_list_out = []
for label2d in label2d_list:
label2d_list_out.append(np.matmul(label2d, affineMat[0:2, 0:2].T) + affineMat[0:2, 2:3].T)
else:
label2d_list_out = None
if label3d_list is not None:
label3d_list_out = []
R_delta = imgUtils.get_rotation_mat3d(theta)
for label3d in label3d_list:
label3d_list_out.append(np.matmul(label3d, R_delta.T))
else:
label3d_list_out = None
if R is not None:
R_delta = imgUtils.get_rotation_mat3d(theta)
R = np.matmul(R_delta, R)
else:
R = None
return img_list_out, label2d_list_out, label3d_list_out, R
@ staticmethod
def add_noise(img, noise=0.00, scale=255.0, alpha=0.3, beta=0.05):
# add brightness noise & add random gaussian noise
a = np.random.uniform(1 - alpha, 1 + alpha, 3)
b = scale * beta * (2 * random.random() - 1)
img = a * img + b + scale * np.random.normal(loc=0.0, scale=noise, size=img.shape)
img = np.clip(img, 0, scale)
return img
| 8,999 | 33.090909 | 106 | py |
Im2Hands | Im2Hands-main/dependencies/intaghand/utils/vis_utils.py | import pickle
import numpy as np
import torch
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from models.manolayer import ManoLayer
from utils.config import get_cfg_defaults
from utils.utils import projection_batch, get_mano_path, get_dense_color_path
# Data structures and functions for rendering
from pytorch3d.structures import Meshes
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib
from pytorch3d.renderer import (
look_at_view_transform,
PerspectiveCameras,
OrthographicCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
HardPhongShader,
TexturesUV,
TexturesVertex,
HardFlatShader,
HardGouraudShader,
AmbientLights,
SoftSilhouetteShader
)
class Renderer():
def __init__(self, img_size, device='cpu'):
self.img_size = img_size
self.raster_settings = RasterizationSettings(
image_size=img_size,
blur_radius=0.0,
faces_per_pixel=1
)
self.amblights = AmbientLights(device=device)
self.point_lights = PointLights(location=[[0, 0, -1.0]], device=device)
self.renderer_rgb = MeshRenderer(
rasterizer=MeshRasterizer(raster_settings=self.raster_settings),
shader=HardPhongShader(device=device)
)
self.device = device
def build_camera(self, cameras=None,
scale=None, trans2d=None):
if scale is not None and trans2d is not None:
bs = scale.shape[0]
R = torch.tensor([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).repeat(bs, 1, 1).to(scale.dtype)
T = torch.tensor([0, 0, 10]).repeat(bs, 1).to(scale.dtype)
return OrthographicCameras(focal_length=2 * scale.to(self.device),
principal_point=-trans2d.to(self.device),
R=R.to(self.device),
T=T.to(self.device),
in_ndc=True,
device=self.device)
if cameras is not None:
# cameras: bs x 3 x 3
fs = -torch.stack((cameras[:, 0, 0], cameras[:, 1, 1]), dim=-1) * 2 / self.img_size
pps = -cameras[:, :2, -1] * 2 / self.img_size + 1
return PerspectiveCameras(focal_length=fs.to(self.device),
principal_point=pps.to(self.device),
in_ndc=True,
device=self.device
)
def build_texture(self, uv_verts=None, uv_faces=None, texture=None,
v_color=None):
if uv_verts is not None and uv_faces is not None and texture is not None:
return TexturesUV(texture.to(self.device), uv_faces.to(self.device), uv_verts.to(self.device))
if v_color is not None:
return TexturesVertex(verts_features=v_color.to(self.device))
def render(self, verts, faces, cameras, textures, amblights=False,
lights=None):
if lights is None:
if amblights:
lights = self.amblights
else:
lights = self.point_lights
mesh = Meshes(verts=verts.to(self.device), faces=faces.to(self.device), textures=textures)
output = self.renderer_rgb(mesh, cameras=cameras, lights=lights)
alpha = output[..., 3]
img = output[..., :3] / 255
return img, alpha
class mano_renderer(Renderer):
def __init__(self, mano_path=None, dense_path=None, img_size=224, device='cpu'):
super(mano_renderer, self).__init__(img_size, device)
if mano_path is None:
mano_path = get_mano_path()
if dense_path is None:
dense_path = get_dense_color_path()
self.mano = ManoLayer(mano_path, center_idx=9, use_pca=True)
self.mano.to(self.device)
self.faces_np = self.mano.get_faces().astype(np.int64)
self.faces = torch.from_numpy(self.faces_np).to(self.device).unsqueeze(0)
with open(dense_path, 'rb') as file:
dense_coor = pickle.load(file)
self.dense_coor = torch.from_numpy(dense_coor) * 255
def render_rgb(self, cameras=None, scale=None, trans2d=None,
R=None, pose=None, shape=None, trans=None,
v3d=None,
uv_verts=None, uv_faces=None, texture=None, v_color=(255, 255, 255),
amblights=False):
if v3d is None:
v3d, _ = self.mano(R, pose, shape, trans=trans)
bs = v3d.shape[0]
vNum = v3d.shape[1]
if not isinstance(v_color, torch.Tensor):
v_color = torch.tensor(v_color)
v_color = v_color.expand(bs, vNum, 3).to(v3d)
return self.render(v3d, self.faces.repeat(bs, 1, 1),
self.build_camera(cameras, scale, trans2d),
self.build_texture(uv_verts, uv_faces, texture, v_color),
amblights)
def render_densepose(self, cameras=None, scale=None, trans2d=None,
R=None, pose=None, shape=None, trans=None,
v3d=None):
if v3d is None:
v3d, _ = self.mano(R, pose, shape, trans=trans)
bs = v3d.shape[0]
vNum = v3d.shape[1]
return self.render(v3d, self.faces.repeat(bs, 1, 1),
self.build_camera(cameras, scale, trans2d),
self.build_texture(v_color=self.dense_coor.expand(bs, vNum, 3).to(v3d)),
True)
class mano_two_hands_renderer(Renderer):
def __init__(self, mano_path=None, dense_path=None, img_size=224, device='cpu'):
super(mano_two_hands_renderer, self).__init__(img_size, device)
if mano_path is None:
mano_path = get_mano_path()
if dense_path is None:
dense_path = get_dense_color_path()
self.mano = {'right': ManoLayer(mano_path['right'], center_idx=None),
'left': ManoLayer(mano_path['left'], center_idx=None)}
self.mano['left'].to(self.device)
self.mano['right'].to(self.device)
left_faces = torch.from_numpy(self.mano['left'].get_faces().astype(np.int64)).to(self.device).unsqueeze(0)
right_faces = torch.from_numpy(self.mano['right'].get_faces().astype(np.int64)).to(self.device).unsqueeze(0)
left_faces = right_faces[..., [1, 0, 2]]
self.faces = torch.cat((left_faces, right_faces + 778), dim=1)
with open(dense_path, 'rb') as file:
dense_coor = pickle.load(file)
self.dense_coor = torch.from_numpy(dense_coor) * 255
def render_rgb(self, cameras=None, scale=None, trans2d=None,
v3d_left=None, v3d_right=None,
uv_verts=None, uv_faces=None, texture=None, v_color=None,
amblights=False,
lights=None):
bs = v3d_left.shape[0]
vNum = v3d_left.shape[1]
if v_color is None:
v_color = torch.zeros((778 * 2, 3))
v_color[:778, 0] = 204
v_color[:778, 1] = 153
v_color[:778, 2] = 0
v_color[778:, 0] = 102
v_color[778:, 1] = 102
v_color[778:, 2] = 255
if not isinstance(v_color, torch.Tensor):
v_color = torch.tensor(v_color)
v_color = v_color.expand(bs, 2 * vNum, 3).float().to(self.device)
v3d = torch.cat((v3d_left, v3d_right), dim=1)
return self.render(v3d,
self.faces.repeat(bs, 1, 1),
self.build_camera(cameras, scale, trans2d),
self.build_texture(uv_verts, uv_faces, texture, v_color),
amblights,
lights)
def render_rgb_orth(self, scale_left=None, trans2d_left=None,
scale_right=None, trans2d_right=None,
v3d_left=None, v3d_right=None,
uv_verts=None, uv_faces=None, texture=None, v_color=None,
amblights=False):
scale = scale_left
trans2d = trans2d_left
s = scale_right / scale_left
d = -(trans2d_left - trans2d_right) / 2 / scale_left.unsqueeze(-1)
s = s.unsqueeze(-1).unsqueeze(-1)
d = d.unsqueeze(1)
v3d_right = s * v3d_right
v3d_right[..., :2] = v3d_right[..., :2] + d
# scale = (scale_left + scale_right) / 2
# trans2d = (trans2d_left + trans2d_right) / 2
return self.render_rgb(self, scale=scale, trans2d=trans2d,
v3d_left=v3d_left, v3d_right=v3d_right,
uv_verts=uv_verts, uv_faces=uv_faces, texture=texture, v_color=v_color,
amblights=amblights)
def render_mask(self, cameras=None, scale=None, trans2d=None,
v3d_left=None, v3d_right=None):
v_color = torch.zeros((778 * 2, 3))
v_color[:778, 2] = 255
v_color[778:, 1] = 255
rgb, mask = self.render_rgb(cameras, scale, trans2d,
v3d_left, v3d_right,
v_color=v_color,
amblights=True)
return rgb
def render_densepose(self, cameras=None, scale=None, trans2d=None,
v3d_left=None, v3d_right=None,):
bs = v3d_left.shape[0]
vNum = v3d_left.shape[1]
v3d = torch.cat((v3d_left, v3d_right), dim=1)
v_color = torch.cat((self.dense_coor, self.dense_coor), dim=0)
return self.render(v3d,
self.faces.repeat(bs, 1, 1),
self.build_camera(cameras, scale, trans2d),
self.build_texture(v_color=v_color.expand(bs, 2 * vNum, 3).to(v3d_left)),
True)
| 10,221 | 39.563492 | 116 | py |
Im2Hands | Im2Hands-main/dependencies/halo/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', initialize_from=None,
initialization_file_name='model_best.pt', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
self.initialize_from = initialize_from
self.initialization_file_name = initialization_file_name
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
# Do not save HALO
new_dict = {}
for x, y in v.state_dict().items():
if 'halo' not in x:
new_dict[x] = y
outdict[k] = new_dict
# outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
if self.initialize_from is not None:
self.initialize_weights()
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
# import pdb; pdb.set_trace()
if k in state_dict:
v.load_state_dict(state_dict[k]) # originally strict=True -> change for HALO
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def initialize_weights(self):
''' Initializes the model weights from another model file.
'''
print('Intializing weights from model %s' % self.initialize_from)
filename_in = os.path.join(
self.initialize_from, self.initialization_file_name)
model_state_dict = self.module_dict.get('model').state_dict()
model_dict = self.module_dict.get('model').state_dict()
model_keys = set([k for (k, v) in model_dict.items()])
init_model_dict = torch.load(filename_in)['model']
init_model_k = set([k for (k, v) in init_model_dict.items()])
for k in model_keys:
if ((k in init_model_k) and (model_state_dict[k].shape ==
init_model_dict[k].shape)):
model_state_dict[k] = init_model_dict[k]
self.module_dict.get('model').load_state_dict(model_state_dict)
def is_url(url):
''' Checks if input is url.'''
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 4,467 | 33.90625 | 93 | py |
Im2Hands | Im2Hands-main/dependencies/halo/training.py | # from im2mesh import icp
import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 1,014 | 23.756098 | 65 | py |
Im2Hands | Im2Hands-main/dependencies/halo/config.py | from models.data.input_helpers import random_rotate
import yaml
from torchvision import transforms
from models import naive
from models import data
method_dict = {
'naive': naive
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.FullLoader )
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
# Datasets
def get_dataset(mode, cfg, return_idx=False, return_category=False):
''' Returns the dataset.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
return_idx (bool): whether to include an ID field
'''
method = cfg['method']
dataset_type = cfg['data']['dataset']
dataset_folder = cfg['data']['path']
use_bps = cfg['model']['use_bps']
# categories = cfg['data']['classes']
rand_rotate = cfg['data']['random_rotate']
# Get split
splits = {
'train': cfg['data']['train_split'],
'val': cfg['data']['val_split'],
'test': cfg['data']['test_split'],
}
split = splits[mode]
# Create dataset
if dataset_type == 'obman':
dataset = data.ObmanDataset(
dataset_folder,
split=split,
no_except=False,
use_bps=use_bps,
return_idx=return_idx
)
elif dataset_type == 'inference':
dataset = data.InferenceDataset(
dataset_folder,
split=split,
no_except=False,
use_bps=use_bps,
random_rotate=rand_rotate,
return_idx=return_idx
)
else:
raise ValueError('Invalid dataset "%s"' % cfg['data']['dataset'])
return dataset
def get_inputs_helper(mode, cfg):
''' Returns the inputs fields.
Args:
mode (str): the mode which is used
cfg (dict): config dictionary
'''
input_type = cfg['data']['input_type']
with_transforms = cfg['data']['with_transforms']
if input_type is None:
inputs_field = None
elif input_type == 'trans_matrix':
inputs_helper = data.TransMatInputHelper(
cfg['data']['transmat_file'],
use_bone_length=cfg['model']['use_bone_length'],
unpackbits=cfg['data']['points_unpackbits']
)
else:
raise ValueError(
'Invalid input type (%s)' % input_type)
return inputs_helper
def get_preprocessor(cfg, dataset=None, device=None):
''' Returns preprocessor instance.
Args:
cfg (dict): config dictionary
dataset (dataset): dataset
device (device): pytorch device
'''
p_type = cfg['preprocessor']['type']
cfg_path = cfg['preprocessor']['config']
model_file = cfg['preprocessor']['model_file']
if p_type == 'psgn':
preprocessor = preprocess.PSGNPreprocessor(
cfg_path=cfg_path,
pointcloud_n=cfg['data']['pointcloud_n'],
dataset=dataset,
device=device,
model_file=model_file,
)
elif p_type is None:
preprocessor = None
else:
raise ValueError('Invalid Preprocessor %s' % p_type)
return preprocessor | 5,471 | 25.955665 | 76 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/training.py | import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
# from im2mesh.common import (
# compute_iou, make_3d_grid
# )
from models.utils import visualize as vis
from models.training import BaseTrainer
from models.naive.loss.loss import (BoneLengthLoss, RootBoneAngleLoss, AllBoneAngleLoss,
SurfaceDistanceLoss, InterpenetrationLoss, ManoVertLoss)
# mano loss
import sys
sys.path.insert(0, "/home/korrawe/halo_vae/scripts")
from manopth.manolayer import ManoLayer
from manopth import demo
# temp
import trimesh
from trimesh.base import Trimesh
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
skinning_loss_weight (float): skinning loss weight for part model
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, kl_weight=0.1, device=None,
input_type='img', vis_dir=None, threshold=0.5, eval_sample=False,
use_inter_loss=False, use_refine_net=False, use_mano_loss=False):
self.model = model
self.optimizer = optimizer
self.kl_weight = kl_weight
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
self.mse_loss = torch.nn.MSELoss()
self.l1_loss = torch.nn.L1Loss()
self.bone_length_loss = BoneLengthLoss(device=device)
self.root_bone_angle_loss = RootBoneAngleLoss(device=device)
self.all_bone_angle_loss = AllBoneAngleLoss(device=device)
self.surface_dist_loss = SurfaceDistanceLoss(device=device)
self.inter_loss = InterpenetrationLoss(device=device)
self.use_surface_loss = False # True
self.use_inter_loss = use_inter_loss
self.use_refine_net = use_refine_net
self.use_refine_net = False
self.use_mano_loss = use_mano_loss
if use_mano_loss:
self.mano_loss = ManoVertLoss(device=device)
self.mano_layer = ManoLayer(
mano_root='/home/korrawe/halo_vae/scripts/mano/models', center_idx=0, use_pca=True, ncomps=45, flat_hand_mean=False)
self.mano_layer = self.mano_layer.to(device)
# mano_joint_parent
self.joint_parent = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
# self.root_bone_idx = np.array([1, 5, 9, 13, 17])
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data, epoch_it):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss, loss_dict = self.compute_loss(data, epoch_it)
loss.backward()
self.optimizer.step()
return loss_dict # loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Compute elbo
# points = data.get('points').to(device)
# occ = data.get('occ').to(device)
object_points = data.get('object_points').float().to(device)
hand_joints = data.get('hand_joints').float().to(device)
# If use BPS
if self.model.use_bps:
object_points = data.get('object_bps').float().to(device)
# inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device)
# voxels_occ = data.get('voxels')
# points_iou = data.get('points_iou.points').to(device)
# occ_iou = data.get('points_iou.occ').to(device)
# if self.model.use_bone_length:
# bone_lengths = data.get('bone_lengths').to(device)
# else:
# bone_lengths = None
kwargs = {}
with torch.no_grad():
kl, pred, obj_c = self.model.compute_kl_divergence(
object_points, hand_joints, reture_obj_latent=True
)
# elbo, rec_error, kl = self.model.compute_elbo()
if self.use_mano_loss:
hand_verts = data.get('hand_verts').float().to(device)
rot, pose, shape, trans = pred[:, :3], pred[:, 3:48], pred[:, 48:58], pred[:, 58:61]
eval_dict['vert'] = self.mano_loss(rot, pose, shape, trans, hand_verts).item()
eval_dict['kl'] = kl.mean().item()
eval_dict['loss'] = self.kl_weight * eval_dict['kl'] + eval_dict['vert']
return eval_dict
eval_dict['joints_recon'] = self.mse_loss(pred, hand_joints).item()
eval_dict['bone_length'] = self.bone_length_loss(pred, hand_joints).item()
root_bone_angle, root_plane_angle = self.root_bone_angle_loss(pred, hand_joints)
eval_dict['root_bone_angle'] = root_bone_angle.item()
eval_dict['root_plane_angle'] = root_plane_angle.item()
eval_dict['all_bone_angle'] = self.all_bone_angle_loss(pred, hand_joints).item()
# import pdb; pdb.set_trace()
eval_dict['kl'] = kl.mean().item()
eval_dict['loss'] = (
2.0 * eval_dict['joints_recon']
+ 2.0 * eval_dict['bone_length']
+ 1.5 * eval_dict['root_bone_angle']
+ 1.5 * eval_dict['root_plane_angle']
+ 1.0 * eval_dict['all_bone_angle']
+ self.kl_weight * eval_dict['kl']
)
# Distance to object surface loss
if self.use_surface_loss:
gt_surface_dist = data.get('closest_point_dist').float().to(device)
loss_surface_dist = self.surface_dist_loss(pred, object_points, gt_surface_dist)
eval_dict['surface_dist'] = loss_surface_dist.item()
# eval_dict['loss'] += 0.5 * eval_dict['surface_dist']
# RefineNet loss
if self.use_refine_net:
tip_dists = data.get('tip_dists').float().to(device)
noisy_joints = data.get('noisy_joints').float().to(device)
refined_joints = self.model.refine_net(noisy_joints, obj_c, tip_dists)
loss_refinement = self.mse_loss(refined_joints, hand_joints)
eval_dict['refine_recon'] = loss_refinement.item()
eval_dict['loss'] += 2.0 * eval_dict['refine_recon']
loss_bone_length_re = self.bone_length_loss(refined_joints, hand_joints)
loss_root_bone_angle_re, loss_root_plane_angle_re = self.root_bone_angle_loss(refined_joints, hand_joints)
loss_all_bone_angle_re = self.all_bone_angle_loss(refined_joints, hand_joints)
eval_dict['refine_other'] = (
2.0 * loss_bone_length_re.item()
+ 1.5 * loss_root_bone_angle_re.item()
+ 1.5 * loss_root_plane_angle_re.item()
+ 1.0 * loss_all_bone_angle_re.item()
)
eval_dict['loss'] += eval_dict['refine_other']
# Interpenetration loss
if self.use_inter_loss:
inside_points = data.get('inside_points').float().to(device)
loss_inter, _ = self.inter_loss(pred, inside_points, self.model.halo_adapter)
eval_dict['inter'] = loss_inter.item()
eval_dict['loss'] += 4.0 * eval_dict['inter'] # 5.0
# eval_dict['loss'] = -elbo.mean().item()
# eval_dict['rec_error'] = rec_error.mean().item()
# # Compute iou
# batch_size = points.size(0)
# with torch.no_grad():
# p_out = self.model(points_iou, inputs, bone_lengths=bone_lengths,
# sample=self.eval_sample, **kwargs)
# occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
# occ_iou_hat_np = (p_out >= threshold).cpu().numpy()
# iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
# eval_dict['iou'] = iou
# Estimate voxel iou
# if voxels_occ is not None:
# voxels_occ = voxels_occ.to(device)
# points_voxels = make_3d_grid(
# (-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3)
# points_voxels = points_voxels.expand(
# batch_size, *points_voxels.size())
# points_voxels = points_voxels.to(device)
# with torch.no_grad():
# p_out = self.model(points_voxels, inputs,
# sample=self.eval_sample, **kwargs)
# voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
# occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
# iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
# eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def visualize(self, data, epoch):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
epoch (int): epoch number
'''
device = self.device
object_points = data.get('object_points').float().to(device)
hand_joints_gt = data.get('hand_joints').float().to(device)
object_inputs = object_points
# If use BPS
if self.model.use_bps:
object_inputs = data.get('object_bps').float().to(device)
vis_idx = np.random.randint(64)
object_inputs = object_inputs[vis_idx].unsqueeze(0)
object_points = object_points[vis_idx].unsqueeze(0)
hand_joints_gt = hand_joints_gt[vis_idx].unsqueeze(0)
# import pdb; pdb.set_trace()
if self.use_mano_loss:
hand_verts_gt = data.get('hand_verts').float().to(device)
num_sample = 8
for n in range(num_sample):
# output_joints = self.model(object_points, hand_joints=hand_joints_gt, sample=False) # sample=True
if n == 0:
output_joints = self.model(object_inputs, hand_joints=hand_joints_gt, sample=False)
elif n == 1:
output_joints = self.model(object_inputs, sample=False)
else:
output_joints = self.model(object_inputs, sample=True)
# print("------------------")
# print(output_joints)
if self.use_mano_loss:
rot, pose, shape, trans = output_joints[:, :3], output_joints[:, 3:48], output_joints[:, 48:58], output_joints[:, 58:61]
_, output_joints = self.mano_layer(torch.cat((rot, pose), 1), shape, trans)
output_joints = output_joints / 10.0
print("val error: ", self.mse_loss(output_joints, hand_joints_gt))
object_points_vis = object_points.detach().cpu().numpy()[0] # [vis_idx]
output_joints_vis = output_joints.detach().cpu().numpy()[0] # [vis_idx]
gt_joints = hand_joints_gt.detach().cpu().numpy()[0] # [vis_idx]
output_path = os.path.join(self.vis_dir, 'ep%03d_%03d_%03d.png' % (epoch, vis_idx, n))
col = 'b' if n == 0 else 'g'
vis.visualise_skeleton(output_joints_vis, object_points_vis, joint_order='mano', color=col, out_file=output_path, show=False)
# HALO
if self.model.halo_adapter is not None:
output_mesh = self.model.halo_adapter(output_joints, joint_order='mano', return_kps=False, original_position=True)
meshout_path = os.path.join(self.vis_dir, 'ep%03d_%03d_%03d.obj' % (epoch, vis_idx, n))
if output_mesh is not None:
output_mesh.export(meshout_path)
# Ground truth object
gt_object_points = Trimesh(vertices=object_points_vis)
objout_path = os.path.join(self.vis_dir, 'ep%03d_%03d_obj.obj' % (epoch, vis_idx))
gt_object_points.export(objout_path)
# import pdb; pdb.set_trace()
# output_joints_new = self.model.halo_adapter(output_joints)
# output_joints_new_vis = output_joints_new.detach().cpu().numpy()[0] # [vis_idx]
# vis.visualise_skeleton(output_joints_new_vis, object_points_vis, color=col, out_file=output_path, show=True)
# mano_joint_parent = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
# batch_size = data['points'].size(0)
# inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
# if self.model.use_bone_length:
# bone_lengths = data.get('bone_lengths').to(device)
# else:
# bone_lengths = None
# shape = (32, 32, 32)
# # shape = (64, 64, 64)
# p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
# p = p.expand(batch_size, *p.size())
# kwargs = {}
# with torch.no_grad():
# p_r = self.model(p, inputs, bone_lengths=bone_lengths, sample=self.eval_sample, **kwargs)
# occ_hat = p_r.view(batch_size, *shape)
# voxels_out = (occ_hat >= self.threshold).cpu().numpy()
# for i in trange(batch_size):
# input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
# # vis.visualize_data(
# # inputs[i].cpu(), self.input_type, input_img_path)
# vis.visualize_voxels(
# voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def compute_loss(self, data, epoch_it):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
# print("device!!!!!", device)
# p = data.get('points').to(device)
# occ = data.get('occ').to(device)
object_points = data.get('object_points').float().to(device)
hand_joints = data.get('hand_joints').float().to(device)
# import pdb; pdb.set_trace()
# Use BPS
if self.model.use_bps:
object_points = data.get('object_bps').float().to(device)
# import pdb; pdb.set_trace()
kwargs = {}
loss_dict = {}
obj_c = self.model.encode_objects(object_points)
# import pdb; pdb.set_trace()
q_z = self.model.infer_z(hand_joints, obj_c, **kwargs)
z = q_z.rsample()
# Ignore object
# c = c * 0.0
# z = z * 0.0
# print('z training', z)
# print("device!!!!!", q_z)
# print("device!!!!!", self.model.p0_z)
# KL-divergence
# if epoch_it > 0:
kl = dist.kl_divergence(q_z, self.model.p0_z).sum(dim=-1)
loss_kl = kl.mean()
loss_dict['kl'] = loss_kl.item()
# else:
# loss_kl = 0.0
# loss_dict['kl'] = 0.0
# import pdb; pdb.set_trace()
# joints
pred = self.model.decode(z, obj_c, **kwargs)
# print("pred", pred)
if self.use_mano_loss:
hand_verts = data.get('hand_verts').float().to(device)
rot, pose, shape, trans = pred[:, :3], pred[:, 3:48], pred[:, 48:58], pred[:, 58:61]
loss_hand_verts = self.mano_loss(rot, pose, shape, trans, hand_verts)
loss_dict['vert'] = loss_hand_verts.item()
loss = self.kl_weight * loss_kl + 1.0 * loss_hand_verts
loss_dict['total'] = loss.item()
return loss, loss_dict
loss = self.mse_loss(pred, hand_joints)
loss_dict['joints_recon'] = loss.item()
# vis.visualise_skeleton(pred[0].detach().cpu().numpy(), object_points[0].detach().cpu().numpy(), show=True)
# vis.visualise_skeleton(hand_joints[0].detach().cpu().numpy(), object_points[0].detach().cpu().numpy(), joint_order='mano', show=True)
# Bone length loss
loss_bone_length = self.bone_length_loss(pred, hand_joints) # self.compute_bone_length_loss(pred, hand_joints)
loss_dict['bone_length'] = loss_bone_length.item()
# Root bone angle loss
loss_root_bone_angle, loss_root_plane_angle = self.root_bone_angle_loss(pred, hand_joints)
loss_dict['root_bone_angle'] = loss_root_bone_angle.item()
loss_dict['root_plane_angle'] = loss_root_plane_angle.item()
# All bone angle loss
loss_all_bone_angle = self.all_bone_angle_loss(pred, hand_joints)
loss_dict['all_bone_angle'] = loss_all_bone_angle.item()
# Distance to object surface loss
if self.use_surface_loss:
gt_surface_dist = data.get('closest_point_dist').float().to(device)
loss_surface_dist = self.surface_dist_loss(pred, object_points, gt_surface_dist)
if self.model.use_bps:
recon_weight = 100.0
bone_weight = 1.0
else:
recon_weight = 2.0
bone_weight = 2.0
loss = (recon_weight * loss
+ bone_weight * loss_bone_length
+ 1.5 * loss_root_bone_angle # 1.5
+ 1.5 * loss_root_plane_angle # 1.5
+ 1.0 * loss_all_bone_angle # 1.5
+ self.kl_weight * loss_kl
)
if self.use_surface_loss:
# loss += 0.5 * loss_surface_dist
loss_dict['surface_dist'] = loss_surface_dist.item()
if self.use_refine_net:
# import pdb; pdb.set_trace()
tip_dists = data.get('tip_dists').float().to(device)
noisy_joints = data.get('noisy_joints').float().to(device)
refined_joints = self.model.refine_net(noisy_joints, obj_c, tip_dists)
loss_refinement_l2 = self.mse_loss(refined_joints, hand_joints)
loss_dict['refine_l2'] = loss_refinement_l2.item()
loss_bone_length_re = self.bone_length_loss(refined_joints, hand_joints)
loss_root_bone_angle_re, loss_root_plane_angle_re = self.root_bone_angle_loss(refined_joints, hand_joints)
loss_all_bone_angle_re = self.all_bone_angle_loss(refined_joints, hand_joints)
loss += (
recon_weight * loss_refinement_l2
+ bone_weight * loss_bone_length_re
+ 1.5 * loss_root_bone_angle_re
+ 1.5 * loss_root_plane_angle_re # 1.5
+ 1.0 * loss_all_bone_angle_re
)
loss_dict['refine_all'] = (
loss_refinement_l2.item() + loss_bone_length_re.item() + loss_root_bone_angle_re.item()
+ loss_root_plane_angle_re.item() + loss_all_bone_angle_re.item()
)
if self.use_inter_loss:
inside_points = data.get('inside_points').float().to(device)
# from trimesh.base import Trimesh
# tmp_joints = pred[None, 0]
# output_mesh = self.model.halo_adapter(tmp_joints, joint_order='mano', original_position=True)
# meshout_path = '/home/korrawe/halo_vae/exp/grab_refine_inter/test/hand.obj'
# output_mesh.export(meshout_path)
# # test query box
# # inside_points = torch.rand(16, 4000, 3).cuda() - 0.5
# # inside_points = inside_points * 25.
# obj_points_tmp = inside_points[0].detach().cpu().numpy()
# gt_object_points = Trimesh(vertices=obj_points_tmp)
# obj_path = '/home/korrawe/halo_vae/exp/grab_refine_inter/test/obj.obj'
# gt_object_points.export(obj_path)
loss_inter, occ_p = self.inter_loss(pred, inside_points, self.model.halo_adapter)
# one_idx = occ_p[0] > 0.5
# obj_points_tmp = inside_points[0, one_idx].detach().cpu().numpy()
# gt_object_points = Trimesh(vertices=obj_points_tmp)
# obj_path = '/home/korrawe/halo_vae/exp/grab_refine_inter/test/intersect.obj'
# gt_object_points.export(obj_path)
# import pdb; pdb.set_trace()
loss += 4.0 * loss_inter # 5.0
loss_dict['inter'] = loss_inter.item()
# import pdb; pdb.set_trace()
# grad_outputs = torch.ones_like(loss_inter)
# grad = torch.autograd.grad(loss_inter, [pred], grad_outputs=grad_outputs, create_graph=True)[0]
loss_dict['total'] = loss.item()
return loss, loss_dict
| 20,600 | 41.476289 | 143 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/config.py | import torch
import torch.distributions as dist
from torch import nn
import os
# from im2mesh.encoder import encoder_dict
# from im2mesh.onet import models, training, generation
# from im2mesh import data
# from im2mesh import config
from models import data
from models import config
from models.naive import models, training, generation
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
encoder_latent = cfg['model']['encoder_latent']
# dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim'] # hand dim
# c_dim = cfg['model']['c_dim']
decoder_dim = cfg['model']['decoder_dim']
use_bps = cfg['model']['use_bps']
use_refine_net = cfg['model']['use_refine_net']
# decoder_kwargs = cfg['model']['decoder_kwargs']
# encoder_kwargs = cfg['model']['encoder_kwargs']
# encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
# decoder_part_output = (decoder == 'piece_rigid' or decoder == 'piece_deform')
# use_bone_length=cfg['model']['use_bone_length']
# decoder = models.decoder_dict[decoder](
# dim=dim, z_dim=z_dim, c_dim=c_dim,
# **decoder_kwargs
# )
# decoder = models.decoder_dict[decoder](
# decoder_c_dim,
# **decoder_kwargs
# )
# encoder = models.encoder_dict[encoder](
# encoder_c_dim,
# **encoder_kwargs
# )
# if encoder is not None:
# encoder = encoder_dict[encoder](
# c_dim=c_dim,
# **encoder_kwargs
# )
# else:
# encoder = None
object_dim = cfg['model']['object_dim']
object_hidden_dim = cfg['model']['object_hidden_dim']
if use_bps:
obj_encoder = None
else:
obj_encoder = models.encoder_dict['pointnet'](
c_dim=object_dim,
hidden_dim=object_hidden_dim
)
# hand_encoder = models.encoder_dict['simple'](
# D_in=21*3, H=128, D_out=128
# )
encoder_dim = cfg['model']['encoder_dim']
hand_encoder_latent = models.encoder_latent_dict['simple_latent'](
c_dim=object_dim,
z_dim=z_dim,
dim=encoder_dim
)
mano_params_out = False # True
if mano_params_out:
D_out = 3 + 45 + 10 + 3
# rot, pose, shape, trans
else:
D_out = 63
decoder = models.decoder_dict['simple'](
D_in=object_dim + z_dim, H=decoder_dim, D_out=D_out,
mano_params_out=mano_params_out
)
refine_model = None
if use_refine_net:
refine_model = models.RefineNet(
in_dim=21, c_dim=object_dim, dim=128
)
p0_z = get_prior_z(cfg, device)
model = models.HaloVAE(
obj_encoder=obj_encoder,
encoder_latent=hand_encoder_latent,
decoder=decoder,
p0_z=p0_z,
use_bps=use_bps,
refine_net=refine_model,
device=device
)
# if z_dim != 0:
# encoder_latent = models.encoder_latent_dict[encoder_latent](
# dim=dim, z_dim=z_dim, c_dim=c_dim,
# **encoder_latent_kwargs
# )
# else:
# encoder_latent = None
# if encoder == 'idx':
# encoder = nn.Embedding(len(dataset), c_dim)
# elif encoder is not None:
# encoder = encoder_dict[encoder](
# c_dim=c_dim,
# **encoder_kwargs
# )
# else:
# encoder = None
# p0_z = get_prior_z(cfg, device)
# model = models.OccupancyNetwork(
# decoder, encoder, encoder_latent, p0_z, device=device
# )
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
use_refine_net = cfg['model']['use_refine_net']
use_inter_loss = cfg['model']['use_inter_loss']
use_mano_loss = cfg['model']['use_mano_loss']
# skinning_loss_weight = cfg['model']['skinning_weight']
kl_weight = cfg['model']['kl_weight']
trainer = training.Trainer(
model, optimizer, kl_weight=kl_weight,
device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
use_refine_net=use_refine_net,
use_inter_loss=use_inter_loss,
use_mano_loss=use_mano_loss
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
preprocessor = config.get_preprocessor(cfg, device=device)
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
with_color_labels=cfg['generation']['vert_labels'],
convert_to_canonical=cfg['generation']['convert_to_canonical'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
preprocessor=preprocessor,
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_data_fields(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
with_transforms = cfg['model']['use_camera']
fields = {}
fields['points'] = data.PointsField(
cfg['data']['points_file'], points_transform,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
fields['points_iou'] = data.PointsField(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if voxels_file is not None:
fields['voxels'] = data.VoxelsField(voxels_file)
return fields
def get_data_helpers(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
with_transforms = cfg['model']['use_camera']
helpers = {}
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
helpers['points_iou'] = data.PointsHelper(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
# if voxels_file is not None:
# fields['voxels'] = data.VoxelsField(voxels_file)
return helpers
def get_data_transforms(mode, cfg):
''' Returns the data transform dict of callable.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
transform_dict = {}
transform_dict['points'] = data.SubsamplePoints(cfg['data']['points_subsample'])
if (cfg['model']['decoder'] == 'piece_rigid' or
cfg['model']['decoder'] == 'piece_deform'):
transform_dict['mesh_points'] = data.SubsampleMeshVerts(cfg['data']['mesh_verts_subsample'])
# transform_dict['reshape_occ'] = data.ReshapeOcc(cfg['data']['mesh_verts_subsample'])
return transform_dict
| 8,530 | 29.90942 | 100 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
import os
from tqdm import trange
import trimesh
from trimesh.base import Trimesh
from im2mesh.utils import libmcubes
from im2mesh.common import make_3d_grid
from im2mesh.utils.libsimplify import simplify_mesh
from im2mesh.utils.libmise import MISE
import time
from models.utils import visualize as vis
from models.data.input_helpers import rot_mat_by_angle
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# mano loss
import sys
sys.path.insert(0, "/home/korrawe/halo_vae/scripts")
from manopth.manolayer import ManoLayer
from manopth import demo
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
with_color_labels (bool): whether to assign part-color to the output mesh vertices
convert_to_canonical (bool): whether to reconstruct mesh in canonical pose (for debugging)
simplify_nfaces (int): number of faces the mesh should be simplified to
preprocessor (nn.Module): preprocessor for inputs
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
with_color_labels=False,
convert_to_canonical=False,
simplify_nfaces=None,
preprocessor=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.padding = padding
self.sample = sample
self.with_color_labels = with_color_labels
self.convert_to_canonical = convert_to_canonical
self.simplify_nfaces = simplify_nfaces
self.preprocessor = preprocessor
self.bone_colors = np.array([
(119, 41, 191, 255), (75, 170, 46, 255), (116, 61, 134, 255), (44, 121, 216, 255), (250, 191, 216, 255), (129, 64, 130, 255),
(71, 242, 184, 255), (145, 60, 43, 255), (51, 68, 187, 255), (208, 250, 72, 255), (104, 155, 87, 255), (189, 8, 224, 255),
(193, 172, 145, 255), (72, 93, 70, 255), (28, 203, 124, 255), (131, 207, 80, 255)
], dtype=np.uint8
)
# MANO
self.mano_layer = ManoLayer(
mano_root='/home/korrawe/halo_vae/scripts/mano/models', center_idx=0, use_pca=True, ncomps=45, flat_hand_mean=False)
self.mano_layer = self.mano_layer.to(device)
def sample_keypoint(self, data, obj_idx, N=1, gen_mesh=False, mesh_dir=None,
random_rotate=False, return_stats=True, use_refine_net=False):
''' Sample n hands for the given data.
Args:
data (dict): data dictionary
N (int): number of hand to sample
mesh_dir(str): output mesh location.
return_stats (bool): whether stats should be returned
'''
sample_per_obj = 10 # 5 for obman # 20 for grab
self.model.eval()
device = self.device
stats_dict = {}
mesh_out = []
kps_out = []
object_points = data.get('object_points').float().to(device)
hand_joints_gt = data.get('hand_joints').float().to(device)
rot_mat = data['rot_mat'][0].float().numpy()
# Use BPS
if self.model.use_bps:
object_inputs = data.get('object_bps').float().to(device)
else:
object_inputs = object_points
vis_idx = 0 # np.random.randint(64)
object_points = object_points[vis_idx].unsqueeze(0)
hand_joints_gt = hand_joints_gt[vis_idx].unsqueeze(0)
obj_mesh_path = data['mesh_path'][0]
obj_name = os.path.splitext(os.path.basename(obj_mesh_path))[0]
# Ground truth object
gt_object_points = Trimesh(vertices=object_points.detach().cpu().numpy()[0])
if self.model.use_bps:
gt_object_points.vertices *= 100.0
# Rotate to canonical
gt_object_points.vertices = gt_object_points.vertices + data['obj_center'][0].numpy()
gt_obj_verts = np.expand_dims(gt_object_points.vertices, -1)
gt_object_points.vertices = np.matmul(rot_mat.T, gt_obj_verts).squeeze(-1)
# gt_object_points.export(os.path.join(mesh_dir, str(obj_name) + '_' + str(obj_idx % sample_per_obj) + '_gt_obj_points.obj'))
# Copy input mesh if available
# import pdb; pdb.set_trace()
if len(obj_mesh_path) > 0:
gt_object_mesh = trimesh.load(obj_mesh_path, process=False)
gt_object_mesh.vertices = gt_object_mesh.vertices * data['scale'][0].item() # + data['obj_center'][0].numpy()
# gt_object_mesh.export(os.path.join(mesh_dir, 'obj' + str(obj_name) + '_gt_obj_mesh.obj')) # obj_idx
gt_object_mesh.export(os.path.join(mesh_dir, str(obj_name) + '_gt_obj_mesh.obj')) # obj_idx
for n in range(N):
# Rotate object input
# if random_rotate:
# x_angle = np.random.rand() * np.pi * 2.0
# y_angle = np.random.rand() * np.pi * 2.0
# z_angle = np.random.rand() * np.pi * 2.0
# rot_mat = rot_mat_by_angle(x_angle, y_angle, z_angle)
# else:
# rot_mat = np.eye(3)
# rot_mat_t = torch.from_numpy(rot_mat).float().to(device)
# object_inputs = torch.matmul(rot_mat_t, object_inputs.unsqueeze(-1)).squeeze(-1)
output_joints, object_latent = self.model(object_inputs, sample=True, reture_obj_latent=True)
# MANO
use_mano_loss = False # True
if use_mano_loss:
rot, pose, shape, trans = output_joints[:, :3], output_joints[:, 3:48], output_joints[:, 48:58], output_joints[:, 58:61]
output_verts, output_joints = self.mano_layer(torch.cat((rot, pose), 1), shape, trans)
output_joints = output_joints / 10.0
output_verts = output_verts / 10.0
object_points_vis = object_points.detach().cpu().numpy()[0] # [vis_idx]
output_joints_vis = output_joints.detach().cpu().numpy()[0] # [vis_idx]
gt_joints = hand_joints_gt.detach().cpu().numpy()[0] # [vis_idx]
if use_refine_net:
# Do refinement
# import pdb; pdb.set_trace()
output_joints_refine = self.model.refine(output_joints, object_latent, object_points, step=3) # 3
refine_output_joints_vis = output_joints_refine.detach().cpu().numpy()[0] # [vis_idx]
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
vis.plot_skeleton_single_view(output_joints_vis, joint_order='mano', object_points=object_points_vis, ax=ax, color='r', show=False)
vis.plot_skeleton_single_view(refine_output_joints_vis, joint_order='mano', ax=ax, color='b', show=False)
# fig.show()
output_path = os.path.join(mesh_dir, '..', 'vis_compare', '%s_%03d.png' % (obj_name, obj_idx % sample_per_obj))
plt.savefig(output_path)
# fig.close()
kps_out.append(output_joints_vis)
# output_path = os.path.join(mesh_dir, '..', 'vis', '%03d_%03d.png' % (obj_idx, n))
output_path = os.path.join(mesh_dir, '..', 'vis', '%s_%03d.png' % (obj_name, obj_idx % sample_per_obj))
# col = 'b' if n == 0 else 'g'
col = 'g'
vis.visualise_skeleton(output_joints_vis, object_points_vis, joint_order='mano', color=col, out_file=output_path, show=False)
# print('vis out', output_path)
# vis.visualise_skeleton(output_joints_vis, object_points_vis, joint_order='mano', color=col, show=True)
# vis.visualise_skeleton(gt_joints, object_points_vis, joint_order='mano', color=col, show=True)
# vis.visualise_skeleton(hand_joints_gt_test.detach().cpu().numpy()[0], object_points_vis, joint_order='biomech', color=col, show=True)
# Save keypoints numpy
kps_path = os.path.join(mesh_dir, '..', 'kps', '%s_%03d.npy' % (obj_name, obj_idx % sample_per_obj))
with open(kps_path, 'wb') as kps_file:
np.save(kps_file, output_joints_vis)
# output_joints_vis = np.load(kps_path)
# output_joints = torch.from_numpy(output_joints_vis).float().to(device).unsqueeze(0)
# MANO
if use_mano_loss:
sample_name = '%s_h%03d' % (str(obj_name), obj_idx % sample_per_obj)
mano_faces = self.mano_layer.th_faces.detach().cpu()
output_mesh = trimesh.Trimesh(vertices=output_verts.detach().cpu().numpy()[0], faces=mano_faces)
output_mesh.vertices = output_mesh.vertices + data['obj_center'][0].numpy()
# Rotate output back to original object orientation
verts = np.expand_dims(output_mesh.vertices, -1)
output_mesh.vertices = np.matmul(rot_mat.T, verts).squeeze(-1)
# import pdb; pdb.set_trace()
# Debug rotation
# gt_object_points = Trimesh(vertices=object_inputs.detach().cpu().numpy()[0])
# gt_object_points.export(os.path.join(mesh_dir, sample_name + '_gt_obj_points.obj'))
meshout_path = os.path.join(mesh_dir, sample_name)
output_mesh.export(meshout_path + '.obj')
continue
# HALO
if self.model.halo_adapter is not None and gen_mesh:
# sample_name = 'obj%03d_h%03d' % (obj_idx, n)
sample_name = '%s_h%03d' % (str(obj_name), obj_idx % sample_per_obj)
# For BPS
if self.model.use_bps:
output_joints *= 100.0
output_mesh, normalized_kps = self.model.halo_adapter(output_joints, joint_order='mano', return_kps=True, original_position=True)
# # Move to object canonical
# print(data['obj_center'][0].numpy())
output_mesh.vertices = output_mesh.vertices + data['obj_center'][0].numpy()
# Rotate output back to original object orientation
verts = np.expand_dims(output_mesh.vertices, -1)
output_mesh.vertices = np.matmul(rot_mat.T, verts).squeeze(-1)
# import pdb; pdb.set_trace()
# Debug rotation
# gt_object_points = Trimesh(vertices=object_inputs.detach().cpu().numpy()[0])
# gt_object_points.export(os.path.join(mesh_dir, sample_name + '_gt_obj_points.obj'))
meshout_path = os.path.join(mesh_dir, sample_name)
output_mesh.export(meshout_path + '.obj')
# Optimize translation
optimize_trans = False # True
if optimize_trans:
# import pdb; pdb.set_trace()
inside_points = data.get('inside_points').float().to(device)
translation = self.model.halo_adapter.optimize_trans(inside_points, output_joints, gt_object_mesh, joint_order='mano')
# import pdb; pdb.set_trace()
translation = translation.detach().cpu().numpy()
# Add translation
new_verts = verts.squeeze(-1) + translation
new_verts = np.expand_dims(new_verts, -1)
# Rotate back
# import pdb; pdb.set_trace()
final_joints = output_joints_vis + translation
final_verts = np.matmul(rot_mat.T, new_verts).squeeze(-1)
output_mesh.vertices = final_verts
meshout_path = os.path.join(mesh_dir, sample_name)
output_mesh.export(meshout_path + '_refine.obj')
# Save keypoints numpy
kps_path = os.path.join(mesh_dir, '..', 'kps', '%s_%03d_refine.npy' % (obj_name, obj_idx % sample_per_obj))
with open(kps_path, 'wb') as kps_file:
np.save(kps_file, final_joints)
# For debugging ground truth
# output_mesh, normalized_kps = self.model.halo_adapter(hand_joints_gt, joint_order='mano', return_kps=True, original_position=True)
# mesh_out.append(output_mesh)
# vis.visualise_skeleton(normalized_kps.detach().cpu().numpy()[0], object_points_vis, color=col, title=sample_name, show=True)
# gt_kps_mesh = Trimesh(vertices=hand_joints_gt.detach().cpu().numpy()[0])
# gt_kps_mesh.export(os.path.join(mesh_dir, sample_name + '_gt_kps.obj'))
if N == 1:
return kps_out[0] # mesh_out[0]
return kps_out # mesh_out
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
bone_lengths = data.get('bone_lengths')
if bone_lengths is not None:
bone_lengths = bone_lengths.to(device)
kwargs = {}
# Preprocess if requires
if self.preprocessor is not None:
t0 = time.time()
with torch.no_grad():
inputs = self.preprocessor(inputs)
stats_dict['time (preprocess)'] = time.time() - t0
# Encode inputs
t0 = time.time()
with torch.no_grad():
c = self.model.encode_inputs(inputs)
stats_dict['time (encode inputs)'] = time.time() - t0
# print(c.size())
# z = self.model.get_z_from_prior((1,), sample=self.sample).to(device)
mesh = self.generate_from_latent(c, bone_lengths=bone_lengths, stats_dict=stats_dict, **kwargs)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, bone_lengths=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Args:
# z (tensor): latent code z
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
# values = self.eval_points(pointsf, z, c, **kwargs).cpu().numpy()
values = self.eval_points(pointsf, c, bone_lengths=bone_lengths, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = torch.FloatTensor(points).to(self.device)
# Normalize to bounding box
pointsf = pointsf / mesh_extractor.resolution
pointsf = box_size * (pointsf - 0.5)
# Evaluate model and update
values = self.eval_points(
pointsf, c, bone_lengths=bone_lengths, **kwargs).cpu().numpy()
# values = self.eval_points(
# pointsf, z, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
# mesh = self.extract_mesh(value_grid, z, c, stats_dict=stats_dict)
mesh = self.extract_mesh(value_grid, c, bone_lengths=bone_lengths, stats_dict=stats_dict)
return mesh
def eval_points(self, p, c=None, bone_lengths=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
# occ_hat = self.model.decode(pi, z, c, **kwargs).logits
occ_hat = self.model.decode(pi, c, bone_lengths=bone_lengths,**kwargs)
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def eval_point_colors(self, p, c=None, bone_lengths=None):
''' Re-evaluates the outputted points from marching cubes for vertex colors.
Args:
p (tensor): points
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
pointsf = torch.FloatTensor(p).to(self.device)
p_split = torch.split(pointsf, self.points_batch_size)
point_labels = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
# occ_hat = self.model.decode(pi, z, c, **kwargs).logits
_, label = self.model.decode(pi, c, bone_lengths=bone_lengths, return_model_indices=True)
point_labels.append(label.squeeze(0).detach().cpu())
# print("label", label[:40])
label = torch.cat(point_labels, dim=0)
label = label.detach().cpu().numpy()
return label
def extract_mesh(self, occ_hat, c=None, bone_lengths=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.occ_hat
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# Undo padding
vertices -= 1
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# Get point colors
if self.with_color_labels:
vert_labels = self.eval_point_colors(vertices, c, bone_lengths=bone_lengths)
vertex_colors = self.bone_colors[vert_labels]
# Convert the mesh vertice back to canonical pose using the trans matrix of the label
# self.convert_to_canonical = False # True
# convert_to_canonical = True
if self.convert_to_canonical:
vertices = self.convert_mesh_to_canonical(vertices, c, vert_labels)
vertices = vertices # * 2.5 * 2.5
else:
vertex_colors = None
# mesh_pymesh = pymesh.form_mesh(vertices, triangles)
# mesh_pymesh = fix_pymesh(mesh_pymesh)
# Estimate normals if needed
if self.with_normals and not vertices.shape[0] == 0:
t0 = time.time()
# normals = self.estimate_normals(vertices, z, c)
normals = self.estimate_normals(vertices, c)
stats_dict['time (normals)'] = time.time() - t0
else:
normals = None
# Create mesh
mesh = trimesh.Trimesh(vertices, triangles,
vertex_normals=normals,
vertex_colors=vertex_colors, ##### add vertex colors
# face_colors=face_colors, ##### try face color
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
# self.refine_mesh(mesh, occ_hat, z, c)
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def convert_mesh_to_canonical(self, vertices, trans_mat, vert_labels):
''' Converts the mesh vertices back to canonical pose using the input transformation matrices
and the labels.
Args:
vertices (numpy array?): vertices of the mesh
c (tensor): latent conditioned code c. Must be a transformation matices without projection.
vert_labels (tensor): labels indicating which sub-model each vertex belongs to.
'''
# print(trans_mat.shape)
# print(vertices.shape)
# print(type(vertices))
# print(vert_labels.shape)
pointsf = torch.FloatTensor(vertices).to(self.device)
# print("pointssf before", pointsf.shape)
# [V, 3] -> [V, 4, 1]
pointsf = torch.cat([pointsf, pointsf.new_ones(pointsf.shape[0], 1)], dim=1)
pointsf = pointsf.unsqueeze(2)
# print("pointsf", pointsf.shape)
vert_trans_mat = trans_mat[0, vert_labels]
# print(vert_trans_mat.shape)
new_vertices = torch.matmul(vert_trans_mat, pointsf)
vertices = new_vertices[:,:3].squeeze(2).detach().cpu().numpy()
# print("return", vertices.shape)
return vertices # new_vertices
def estimate_normals(self, vertices, c=None):
''' Estimates the normals by computing the gradient of the objective.
Args:
vertices (numpy array): vertices of the mesh
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
device = self.device
vertices = torch.FloatTensor(vertices)
vertices_split = torch.split(vertices, self.points_batch_size)
normals = []
# z, c = z.unsqueeze(0), c.unsqueeze(0)
c = c.unsqueeze(0)
for vi in vertices_split:
vi = vi.unsqueeze(0).to(device)
vi.requires_grad_()
# occ_hat = self.model.decode(vi, z, c).logits
occ_hat = self.model.decode(vi, c)
out = occ_hat.sum()
out.backward()
ni = -vi.grad
ni = ni / torch.norm(ni, dim=-1, keepdim=True)
ni = ni.squeeze(0).cpu().numpy()
normals.append(ni)
normals = np.concatenate(normals, axis=0)
return normals
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
# self.model.decode(face_point.unsqueeze(0), z, c).logits
self.model.decode(face_point.unsqueeze(0), c)
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh | 26,592 | 42.24065 | 148 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/models/refine.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class RefineNet(nn.Module):
''' RefineNet class.
Takes noisy joints and object latent vector as input and output the refined joints
Args:
out_dim (int): dimension of output code z
c_dim (int): dimension of object latent code c
dim (int): input dimension, joint_num
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, in_dim=21, c_dim=128, out_dim=21 * 3, dim=128): # c_dim=128
super().__init__()
self.out_dim = out_dim
self.c_dim = c_dim
self.dist_dim = 21 # 5 # finger tip to surface distances
# self.fc_0 = nn.Linear(1, 128)
# Size: joints + object + joint-to-surface dist
self.fc_0 = nn.Linear(in_dim * 3 + c_dim + self.dist_dim, dim)
self.fc_1 = nn.Linear(dim, dim)
self.fc_2 = nn.Linear(dim, dim)
self.fc_3 = nn.Linear(dim, dim)
self.fc_out = nn.Linear(dim, out_dim)
self.actvn = nn.LeakyReLU(0.1)
def forward(self, joints, obj_code, dist, **kwargs):
# batch_size, 21, 3
batch_size, joint_len, D = joints.size()
# output size: B x T X F
# net = self.fc_0(x.unsqueeze(-1))
# net = net + self.fc_pos(p)
joints = joints.reshape(batch_size, -1)
net = self.fc_0(torch.cat([joints, obj_code, dist], dim=1))
net = self.fc_1(self.actvn(net)) + net
net = self.fc_2(self.actvn(net)) + net
net = self.fc_3(self.actvn(net)) + net
net = self.fc_out(net)
y_pred = net.reshape(-1, 21, 3)
return y_pred
| 1,660 | 31.568627 | 86 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/models/core.py | import torch
import torch.nn as nn
from torch import distributions as dist
from models.halo_adapter.adapter import HaloAdapter
class HaloVAE(nn.Module):
''' HALO VAE Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
encoder_latent (nn.Module): latent encoder network
p0_z (dist): prior distribution for latent code z
device (device): torch device
'''
def __init__(self, obj_encoder=None, hand_encoder=None, decoder=None, encoder_latent=None,
p0_z=None, refine_net=None, use_bps=False, device=None):
super().__init__()
if p0_z is None:
p0_z = dist.Normal(torch.zeros(128, device=device),
torch.ones(128, device=device))
self.halo_adapter = None
self.decoder = decoder.to(device)
self.use_bps = use_bps
if refine_net is not None:
self.refine_net = refine_net.to(device)
else:
self.refine_net = None
if encoder_latent is not None:
self.encoder_latent = encoder_latent.to(device)
else:
self.encoder_latent = None
if obj_encoder is not None:
self.obj_encoder = obj_encoder.to(device)
else:
self.obj_encoder = None
if hand_encoder is not None:
self.hand_encoder = hand_encoder.to(device)
else:
self.hand_encoder = None
self._device = device
self.p0_z = p0_z
def initialize_halo(self, halo_config_file, denoiser_pth=None):
''' Attach HALO model to the keypoint model.
Args:
halo_adapter (HaloAdapter): Adapter that is already initialized
'''
self.halo_adapter = HaloAdapter(halo_config_file, device=self._device, denoiser_pth=denoiser_pth)
print('initialized halo')
def forward(self, obj_points, hand_joints=None, sample=True, reture_obj_latent=False, **kwargs):
''' Performs a forward pass through the network.
Args:
obj_points (tensor): input object
hand_joints (tensor): hand_joints, not used during inference
sample (bool): whether to sample for z, ignored if hand_joints is not None
'''
batch_size = obj_points.size(0)
object_latent = self.encode_objects(obj_points)
# Ignore object
# object_latent = object_latent * 0.0
# print('c', object_latent)
if hand_joints is not None:
q_z = self.infer_z(hand_joints, object_latent, **kwargs)
# print('q_z', q_z)
z = q_z.mean
# print('self.p0_z mean', self.p0_z.mean)
else:
z = self.get_z_from_prior((batch_size,), sample=sample)
# print('z', z)
# p_r = self.decode(p, z, c, **kwargs)
# z = z * 0. # ####
p_r = self.decode(z, object_latent, **kwargs)
if reture_obj_latent:
return p_r, object_latent
return p_r
def compute_kl_divergence(self, obj_points, hand_joints, reture_obj_latent=False, **kwargs):
''' Computes the expectation lower bound.
Args:
hand_joints (tensor): sampled points
# occ (tensor): occupancy values for p
inputs (tensor): conditioning input
'''
object_latent = self.encode_objects(obj_points)
q_z = self.infer_z(hand_joints, object_latent, **kwargs)
z = q_z.rsample()
pred = self.decode(z, object_latent, **kwargs)
kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
if reture_obj_latent:
return kl, pred, object_latent
return kl, pred
# def compute_elbo(self, obj_points, hand_joints, **kwargs):
# ''' Computes the expectation lower bound.
# Args:
# hand_joints (tensor): sampled points
# # occ (tensor): occupancy values for p
# inputs (tensor): conditioning input
# '''
# object_latent = self.encode_objects(obj_points)
# q_z = self.infer_z(hand_joints, object_latent, **kwargs)
# z = q_z.rsample()
# p_r = self.decode(z, object_latent, **kwargs)
# # rec_error = -p_r.log_prob(occ).sum(dim=-1)
# rec_error = self.mse_loss(p_r, hand_joints)
# kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
# elbo = -rec_error - kl
# return elbo, rec_error, kl
def encode_objects(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
if self.obj_encoder is not None:
c = self.obj_encoder(inputs)
else:
# Return inputs
# c = torch.empty(inputs.size(0), 0)
c = inputs
return c
def decode(self, z, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c
'''
# batch_size, points_size, p_dim = p.size()
# p = p.reshape(batch_size * points_size, p_dim)
p_r = self.decoder(z, c)
return p_r
def infer_z(self, joints, c, **kwargs):
''' Infers z.
Args:
joints (tensor): joints tensor
occ (tensor): occupancy values for occ
c (tensor): latent conditioned code c
'''
if self.encoder_latent is not None:
# print('not None')
mean_z, logstd_z = self.encoder_latent(joints, c, **kwargs)
else:
batch_size = joints.size(0)
mean_z = torch.empty(batch_size, 0).to(self._device)
logstd_z = torch.empty(batch_size, 0).to(self._device)
# print("kkkk", mean_z, torch.exp(logstd_z))
q_z = dist.Normal(mean_z, torch.exp(logstd_z))
# import pdb;pdb.set_trace()
return q_z
def get_z_from_prior(self, size=torch.Size([]), sample=True):
''' Returns z from prior distribution.
Args:
size (Size): size of z
sample (bool): whether to sample
'''
if sample:
z = self.p0_z.sample(size).to(self._device)
# print(" -- sample -- ")
# print(z)
else:
z = self.p0_z.mean.to(self._device)
# print(" -- mean -- ")
z = z.expand(*size, *z.size())
# print(" -- sample --")
# print(z)
# import pdb; pdb.set_trace()
# import matplotlib.pyplot as plt
return z
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
def refine(self, joints, obj_code, object_points, step=1):
tips_idx = torch.Tensor([4, 8, 12, 16, 20]).long()
cur_joints = joints
for i in range(step):
# import pdb; pdb.set_trace()
# tips_dist = cal_tips_dist(joints, obj_points)
pred_dist = torch.cdist(cur_joints, object_points)
min_val, min_idx = torch.min(pred_dist, dim=2)
# tips_dist = min_val[:, tips_idx]
tips_dist = min_val
out = self.refine_net(cur_joints, obj_code, tips_dist)
cur_joints = out
return cur_joints
# class ArticulatedHandNet(nn.Module):
# ''' Occupancy Network class.
# Args:
# decoder (nn.Module): decoder network
# encoder (nn.Module): encoder network
# encoder_latent (nn.Module): latent encoder network
# p0_z (dist): prior distribution for latent code z
# device (device): torch device
# '''
# def __init__(self, decoder, encoder=None, encoder_latent=None, use_bone_length=False, per_part_output=False,
# p0_z=None, device=None):
# super().__init__()
# if p0_z is None:
# p0_z = dist.Normal(torch.tensor([]), torch.tensor([]))
# self.decoder = decoder.to(device)
# self.use_bone_length = use_bone_length
# # if encoder_latent is not None:
# # self.encoder_latent = encoder_latent.to(device)
# # else:
# # self.encoder_latent = None
# # If true, return predicted occupancies for each part-model
# self.per_part_output = per_part_output
# if encoder is not None:
# self.encoder = encoder.to(device)
# else:
# self.encoder = None
# self._device = device
# # self.p0_z = p0_z
# def forward(self, p, inputs, bone_lengths=None, sample=True, **kwargs):
# ''' Performs a forward pass through the network.
# Args:
# p (tensor): sampled points
# inputs (tensor): conditioning input
# sample (bool): whether to sample for z
# '''
# c = self.encode_inputs(inputs)
# # z = self.get_z_from_prior((batch_size,), sample=sample)
# # p_r = self.decode(p, z, c, **kwargs)
# p_r = self.decode(p, c, bone_lengths=bone_lengths, **kwargs)
# return p_r
# # def compute_elbo(self, p, occ, inputs, **kwargs):
# # ''' Computes the expectation lower bound.
# # Args:
# # p (tensor): sampled points
# # occ (tensor): occupancy values for p
# # inputs (tensor): conditioning input
# # '''
# # c = self.encode_inputs(inputs)
# # q_z = self.infer_z(p, occ, c, **kwargs)
# # z = q_z.rsample()
# # p_r = self.decode(p, z, c, **kwargs)
# # rec_error = -p_r.log_prob(occ).sum(dim=-1)
# # kl = dist.kl_divergence(q_z, self.p0_z).sum(dim=-1)
# # elbo = -rec_error - kl
# # return elbo, rec_error, kl
# def encode_inputs(self, inputs):
# ''' Encodes the input.
# Args:
# input (tensor): the input
# '''
# if self.encoder is not None:
# c = self.encoder(inputs)
# else:
# # Return inputs
# # c = torch.empty(inputs.size(0), 0)
# c = inputs
# return c
# def decode(self, p, c, bone_lengths=None, reduce_part=True, return_model_indices=False, **kwargs):
# ''' Returns occupancy probabilities for the sampled points.
# Args:
# p (tensor): points
# z (tensor): latent code z
# c (tensor): latent conditioned code c
# # joints (tensor): joint locations
# reduce_part (bool): whether to reduce the last (sub-model) dimention for
# part-base model (with max() or logSumExp()). Only considered if part-base model is used.
# True when training normal occupancy, and False when training skinning weight.
# return_model_indices (bool): only for geration
# '''
# ############### Expand latent code to match the number of points here
# # reshape to [batch x points, latent size]
# # sdf_data = (samples.cuda()).reshape(
# # num_samp_per_scene * scene_per_subbatch, 5 # 4
# # )
# ##### repeat interleave
# batch_size, points_size, p_dim = p.size()
# p = p.reshape(batch_size * points_size, p_dim)
# # print("c shape", c.size())
# c = c.repeat_interleave(points_size, dim=0)
# if bone_lengths is not None:
# bone_lengths = bone_lengths.repeat_interleave(points_size, dim=0)
# # print("c shape", c.size())
# # True during testing
# if return_model_indices:
# # If part-labels are needed, get [batch x bones] probabilities from the model and find argmax externally
# p_r = self.decoder(p, c, bone_lengths, reduce_part=False)
# p_r = self.decoder.sigmoid(p_r)
# if self.decoder.smooth_max:
# _, sub_model_indices = p_r.max(1, keepdim=True)
# # p_r = p_r.logsumexp(1, keepdim=True)
# weights = nn.functional.softmax(5.0 * p_r, dim=1)
# p_r = torch.sum(weights * p_r, dim=1)
# else:
# p_r, sub_model_indices = p_r.max(1, keepdim=True)
# # p_r = self.decoder.sigmoid(p_r)
# sub_model_indices = sub_model_indices.reshape(batch_size, points_size)
# return p_r, sub_model_indices
# else:
# p_r = self.decoder(p, c, bone_lengths, reduce_part=reduce_part)
# p_r = self.decoder.sigmoid(p_r)
# if reduce_part:
# if self.decoder.smooth_max:
# # p_r = p_r.logsumexp(1, keepdim=True)
# weights = nn.functional.softmax(5.0 * p_r, dim=1)
# p_r = torch.sum(weights * p_r, dim=1)
# else:
# p_r, _ = p_r.max(1, keepdim=True)
# p_r = p_r.reshape(batch_size, points_size)
# # p_r = self.decoder.sigmoid(p_r) #
# # # From NASA original
# # # Soft-Max Blending
# # weights = tf.nn.softmax(hparams.soft_blend * x, axis=1)
# # x = tf.reduce_sum(weights * x, axis=1)
# # # #
# # # print("p_r shape", p_r.size())
# # # print("reduce part", reduce_part)
# # if reduce_part:
# # if self.per_part_output:
# # # print("size in model ", p_r.size())
# # print("before max", p_r.shape)
# # # If smooth max is used, the returned p_r will be pre-sigmoid.
# # # The index is obtained using max, while the prob is obtained using logSumExp
# # if self.decoder.smooth_max:
# # _, sub_model_indices = p_r.max(1, keepdim=True)
# # p_r = p_r.logsumexp(1, keepdim=True)
# # p_r = nn.Sigmoid(p_r)
# # else:
# # p_r, sub_model_indices = p_r.max(1, keepdim=True)
# # print("after max", p_r.shape)
# # # If use logSumExp instead of max()
# # # p_r = p_r.logsumexp(1, keepdim=True)
# # # p_r = p_r.exp().sum(dim=1, keepdim=True) - (p_r.size(-1) - 1.)
# # # p_r = p_r.log()
# # # print(p_r[0])
# # # print(p_r.size())
# # sub_model_indices = sub_model_indices.reshape(batch_size, points_size)
# # # reshape back to [batch, points]
# # p_r = p_r.reshape(batch_size, points_size)
# # if self.per_part_output and return_model_indices:
# # return p_r, sub_model_indices
# # else:
# # p_r = nn.Sigmoid(p_r)
# return p_r
# # def infer_z(self, p, occ, c, **kwargs):
# # ''' Infers z.
# # Args:
# # p (tensor): points tensor
# # occ (tensor): occupancy values for occ
# # c (tensor): latent conditioned code c
# # '''
# # if self.encoder_latent is not None:
# # mean_z, logstd_z = self.encoder_latent(p, occ, c, **kwargs)
# # else:
# # batch_size = p.size(0)
# # mean_z = torch.empty(batch_size, 0).to(self._device)
# # logstd_z = torch.empty(batch_size, 0).to(self._device)
# # q_z = dist.Normal(mean_z, torch.exp(logstd_z))
# # return q_z
# # def get_z_from_prior(self, size=torch.Size([]), sample=True):
# # ''' Returns z from prior distribution.
# # Args:
# # size (Size): size of z
# # sample (bool): whether to sample
# # '''
# # if sample:
# # z = self.p0_z.sample(size).to(self._device)
# # else:
# # z = self.p0_z.mean.to(self._device)
# # z = z.expand(*size, *z.size())
# # return z
# def to(self, device):
# ''' Puts the model to the device.
# Args:
# device (device): pytorch device
# '''
# model = super().to(device)
# model._device = device
# return model | 16,286 | 36.876744 | 118 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/models/encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def maxpool(x, dim=-1, keepdim=False):
out, _ = x.max(dim=dim, keepdim=keepdim)
return out
class SimpleEncoder(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
Crate a two-layers networks with relu activation.
"""
super(SimpleEncoder, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, H)
self.out_layer = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
h2_relu = self.linear2(h_relu).clamp(min=0)
y_pred = self.out_layer(h2_relu)
return y_pred
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetPointnet(nn.Module):
''' PointNet-based encoder network with ResNet blocks.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
'''
def __init__(self, c_dim=128, dim=3, hidden_dim=128):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim, 2*hidden_dim)
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.pool = maxpool
def forward(self, p):
batch_size, T, D = p.size()
# output size: B x T X F
net = self.fc_pos(p)
net = self.block_0(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_1(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_2(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_3(net)
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
net = torch.cat([net, pooled], dim=2)
net = self.block_4(net)
# Recode to B x F
net = self.pool(net, dim=1)
c = self.fc_c(self.actvn(net))
return c
class LetentEncoder(nn.Module):
''' Latent encoder class.
It encodes the input points and returns mean and standard deviation for the
posterior Gaussian distribution.
Args:
z_dim (int): dimension of output code z
c_dim (int): dimension of latent conditioned code c
dim (int): input dimension, joint_num
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, z_dim=64, c_dim=128, in_dim=21, dim=128, leaky=True): # leaky=True
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
self.fc_pos = nn.Linear(in_dim, dim)
# if c_dim != 0:
# self.fc_c = nn.Linear(c_dim, 128)
# self.fc_0 = nn.Linear(1, 128)
self.fc_0 = nn.Linear(in_dim * 3 + c_dim, dim)
self.fc_1 = nn.Linear(dim, dim)
self.fc_2 = nn.Linear(dim, dim)
self.fc_3 = nn.Linear(dim, dim)
self.fc_mean = nn.Linear(dim, z_dim)
self.fc_logstd = nn.Linear(dim, z_dim)
if not leaky:
self.actvn = F.relu
self.pool = maxpool
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.pool = torch.mean
def forward(self, joints, c, **kwargs):
# batch_size, 21, 3
batch_size, joint_len, D = joints.size()
# output size: B x T X F
# net = self.fc_0(x.unsqueeze(-1))
# net = net + self.fc_pos(p)
joints = joints.reshape(batch_size, -1)
net = self.fc_0(torch.cat([joints, c], dim=1))
net = self.fc_1(self.actvn(net)) + net
net = self.fc_2(self.actvn(net)) + net
net = self.fc_3(self.actvn(net)) + net
# if self.c_dim != 0:
# net = net + self.fc_c(c).unsqueeze(1)
# net = self.fc_1(self.actvn(net))
# pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
# net = torch.cat([net, pooled], dim=2)
# net = self.fc_2(self.actvn(net))
# pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())
# net = torch.cat([net, pooled], dim=2)
# net = self.fc_3(self.actvn(net))
# # Reduce
# # to B x F
# net = self.pool(net, dim=1)
mean = self.fc_mean(net)
logstd = self.fc_logstd(net)
return mean, logstd
| 5,962 | 29.269036 | 90 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class SimpleDecoder(torch.nn.Module):
def __init__(self, D_in, H, D_out, mano_params_out=False):
"""
Crate a simple feed-forward networks with relu activation.
"""
super(SimpleDecoder, self).__init__()
# self.linear1 = torch.nn.Linear(D_in, H)
# self.linear2 = torch.nn.Linear(H, H)
# self.out_layer = torch.nn.Linear(H, D_out)
self.mano_params_out = mano_params_out
# print(" ------- mano_params_out", mano_params_out)
self.fc_1 = torch.nn.Linear(D_in, H)
self.fc_2 = torch.nn.Linear(H, H)
self.fc_3 = torch.nn.Linear(H, H)
self.fc_4 = torch.nn.Linear(H, D_out)
self.actvn = nn.LeakyReLU(0.1)
def forward(self, z, c):
x = torch.cat([z, c], 1)
# h_relu = self.linear1(x).clamp(min=0)
# h2_relu = self.linear2(h_relu).clamp(min=0)
# y_pred = self.out_layer(h2_relu)
net = self.fc_1(x)
net = self.fc_2(self.actvn(net)) + net
net = self.fc_3(self.actvn(net)) + net
net = self.fc_4(self.actvn(net))
# mano_params_out = True
if self.mano_params_out:
y_pred = net
else:
y_pred = net.reshape(-1, 21, 3)
# y_pred = net.reshape(-1, 21, 3)
# print("y_pred", y_pred.shape)
# y_pred = y_pred.reshape(-1, 21, 3)
return y_pred
class SimpleDecoderss(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
# xyz_in_all=None,
use_sigmoid=False,
latent_dropout=False,
):
super(SimpleDecoder, self).__init__()
dims = [latent_size + 3] + dims + [1]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
# self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for layer in range(0, self.num_layers - 1):
if layer + 1 in latent_in:
out_dim = dims[layer + 1] - dims[0]
else:
out_dim = dims[layer + 1]
# if self.xyz_in_all and layer != self.num_layers - 2:
# out_dim -= 3
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(layer),
nn.utils.weight_norm(nn.Linear(dims[layer], out_dim)),
)
else:
setattr(self, "lin" + str(layer), nn.Linear(dims[layer], out_dim))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(layer), nn.LayerNorm(out_dim))
# print(dims[layer], out_dim)
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, xyz, latent, reduce_part=False):
batch_size = xyz.size(0)
# print("latent size", latent.size())
# print(latent)
# reshape from [batch_size, 16, 4, 4] to [batch_size, 256]
latent = latent.reshape(batch_size, -1)
# print("latent size", latent.size())
# print(latent)
# print("xyz size", xyz.size())
input = torch.cat([latent, xyz], 1)
x = input
for layer in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input], 1)
# elif layer != 0 and self.xyz_in_all:
# x = torch.cat([x, xyz], 1)
x = lin(x)
# last layer Sigmoid
if layer == self.num_layers - 2 and self.use_sigmoid:
x = self.sigmoid(x)
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(layer))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and layer in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
# if hasattr(self, "th"):
# x = self.th(x)
return x
| 4,897 | 31.437086 | 82 | py |
Im2Hands | Im2Hands-main/dependencies/halo/naive/loss/loss.py | import numpy as np
import torch
import torch.nn as nn
from models.halo_adapter.converter import PoseConverter, transform_to_canonical, angle2, signed_angle
from models.halo_adapter.interface import convert_joints
def kp3D_to_bones(kp_3D, joint_parent, normalize_length=False):
"""
Converts from joints to bones
"""
eps_mat = torch.tensor(1e-9, device=kp_3D.device)
batch_size = kp_3D.shape[0]
bones = kp_3D[:, 1:] - kp_3D[:, joint_parent[1:]] # .detach()
if normalize_length:
bone_lengths = torch.max(torch.norm(bones, dim=2, keepdim=True), eps_mat)
# print("bone_length", bone_lengths)
bones = bones / bone_lengths
return bones
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def vis_bone(hand_joints, joint_parents):
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
b_start_loc = hand_joints[:, joint_parents[1:]][0]
b_end_loc = hand_joints[:, 1:][0]
for b in range(20):
# color = 'r' if b in [1, 5, 9, 13, 17] else 'b'
color = 'r' if b in [0, 4, 8, 12, 16] else 'b'
ax.plot([b_start_loc[b, 0], b_end_loc[b, 0]],
[b_start_loc[b, 1], b_end_loc[b, 1]],
[b_start_loc[b, 2], b_end_loc[b, 2]], color=color)
plt.show()
class BoneLengthLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
if joint_parents is None:
self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
self.l1_loss = torch.nn.L1Loss()
self.l2_loss = torch.nn.MSELoss()
def forward(self, pred_joints, hand_joints):
pred_bones = kp3D_to_bones(pred_joints, self.joint_parents)
pred_bone_lengths = pred_bones.norm(dim=2)
gt_bones = kp3D_to_bones(hand_joints, self.joint_parents)
gt_bone_lengths = gt_bones.norm(dim=2)
bone_length_loss = self.l2_loss(pred_bone_lengths, gt_bone_lengths)
return bone_length_loss
def angle_diff(pred_angle, gt_angle):
loss = torch.mean(
torch.abs(torch.cos(pred_angle) - torch.cos(gt_angle)) +
torch.abs(torch.sin(pred_angle) - torch.sin(gt_angle)),
)
return loss
class RootBoneAngleLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
if joint_parents is None:
self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
self.plane_angle_w = 1.0
self.bone_angle_w = 1.0
def forward(self, pred_joints, hand_joints):
pred_bones = kp3D_to_bones(pred_joints, self.joint_parents, normalize_length=True)
pred_angle = self._compute_root_bone_angle(pred_bones)
pred_plane_angle = self._compute_root_plane_angle(pred_bones)
gt_bones = kp3D_to_bones(hand_joints, self.joint_parents, normalize_length=True)
gt_angle = self._compute_root_bone_angle(gt_bones)
gt_plane_angle = self._compute_root_plane_angle(gt_bones)
# vis_bone(hand_joints, self.joint_parents)
# import pdb; pdb.set_trace()
root_bone_angle_loss = angle_diff(pred_angle, gt_angle)
root_plane_angle_loss = angle_diff(pred_plane_angle, gt_plane_angle)
# import pdb; pdb.set_trace()
return root_bone_angle_loss, root_plane_angle_loss
def _compute_root_bone_angle(self, bones):
"""
Assume MANO joint parent
"""
# angle between (n0,n1), (n1,n2), (n2,n3)
# thumb and index (plane n0)
n0 = torch.cross(bones[:, 4], bones[:, 0])
# middle and index (plane n1)
n1 = torch.cross(bones[:, 8], bones[:, 4])
# ring and middle (plane n2)
n2 = torch.cross(bones[:, 12], bones[:, 8])
# ring and pinky (plane n3)
n3 = torch.cross(bones[:, 16], bones[:, 12])
root_bone_angles = torch.stack([
signed_angle(bones[:, 4], bones[:, 0], n0),
signed_angle(bones[:, 8], bones[:, 4], n1),
signed_angle(bones[:, 12], bones[:, 8], n2),
signed_angle(bones[:, 16], bones[:, 12], n3)],
dim=1
)
return root_bone_angles
def _compute_root_plane_angle(self, bones):
'''
angles between root bone planes
'''
# angle between (n0,n1), (n1,n2), (n2,n3)
# thumb and index (plane n0)
n0 = torch.cross(bones[:, 4], bones[:, 0])
# middle and index (plane n1)
n1 = torch.cross(bones[:, 8], bones[:, 4])
# ring and middle (plane n2)
n2 = torch.cross(bones[:, 12], bones[:, 8])
# ring and pinky (plane n3)
n3 = torch.cross(bones[:, 16], bones[:, 12])
root_plane_angles = torch.stack([
signed_angle(n0, n1, bones[:, 4]),
signed_angle(n2, n1, bones[:, 8]),
signed_angle(n3, n2, bones[:, 12])],
dim=1
)
return root_plane_angles
class AllBoneAngleLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
if joint_parents is None:
self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
# 3D keypoints to transformation matrices converter
self.global_normalizer = transform_to_canonical
self.pose_normalizer = PoseConverter(straight_hand=True)
def forward(self, pred_joints, hand_joints):
is_right_vec = torch.ones(pred_joints.shape[0], device=pred_joints.device)
# vis_bone(hand_joints, self.joint_parents)
hand_joints = convert_joints(hand_joints, source='mano', target='biomech')
hand_joints_normalized, _ = self.global_normalizer(hand_joints, is_right=is_right_vec)
gt_bone_angles = self.pose_normalizer(hand_joints_normalized, is_right_vec, return_rot_only=True)
pred_joints = convert_joints(pred_joints, source='mano', target='biomech')
pred_joints_normalized, _ = self.global_normalizer(pred_joints, is_right=is_right_vec)
pred_bone_angles = self.pose_normalizer(pred_joints_normalized, is_right_vec, return_rot_only=True)
all_bone_angle_loss = angle_diff(pred_bone_angles, gt_bone_angles)
# all_bone_angle_loss = angle_diff(gt_bone_angles, gt_bone_angles)
# import pdb; pdb.set_trace()
return all_bone_angle_loss
class SurfaceDistanceLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
# if joint_parents is None:
# self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
self.mse_loss = torch.nn.MSELoss()
def forward(self, pred_joints, object_points, gt_distance):
# torch.cdist(a, b, p=2)
pred_dist = torch.cdist(pred_joints, object_points)
min_val, min_idx = torch.min(pred_dist, dim=2)
surface_dist_loss = self.mse_loss(min_val, gt_distance)
# import pdb; pdb.set_trace()
return surface_dist_loss
class InterpenetrationLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
def forward(self, pred_joints, object_points, halo_model):
# import pdb; pdb.set_trace()
# object_points = object_points - pred_joints[:, None, 0]
occ_p = halo_model.query_points(object_points, pred_joints, joint_order='mano')
occ_p = torch.where(occ_p > 0.5, occ_p, torch.zeros_like(occ_p))
penetration_loss = occ_p.mean()
# import pdb; pdb.set_trace()
# mask = (occ_p > 0.5).detach()
# pred_dist = torch.cdist(pred_joints, object_points)
# closest_dist, closest_joints = torch.min(pred_dist, dim=1)
# # Apply loss up along kinematic chain
# loss_sum = 0
return penetration_loss, occ_p
class ManoVertLoss(nn.Module):
def __init__(self, device=None, joint_parents=None):
super().__init__()
self.device = device
self.joint_parents = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
self.mse_loss = torch.nn.MSELoss()
import sys
sys.path.insert(0, "/home/korrawe/halo_vae/scripts")
from manopth.manolayer import ManoLayer
from manopth import demo
# import pdb; pdb.set_trace()
self.mano_layer = ManoLayer(
mano_root='/home/korrawe/halo_vae/scripts/mano/models', center_idx=0, use_pca=True, ncomps=45, flat_hand_mean=False)
self.mano_layer = self.mano_layer.to(device)
# hand_verts, hand_joints = mano_layer(torch.cat((rot, pose), 1), shape, trans)
def forward(self, rot, pose, shape, trans, gt_hand_verts):
# import pdb; pdb.set_trace()
hand_verts, hand_joints = self.mano_layer(torch.cat((rot, pose), 1), shape, trans)
hand_verts = hand_verts / 10.0
hand_joints = hand_joints / 10.0
vert_loss = self.mse_loss(hand_verts, gt_hand_verts)
# occ_p = halo_model.query_points(object_points, pred_joints, joint_order='mano')
# occ_p = torch.where(occ_p > 0.5, occ_p, torch.zeros_like(occ_p))
# penetration_loss = occ_p.mean()
# import pdb; pdb.set_trace()
return vert_loss
| 9,610 | 37.138889 | 128 | py |
Im2Hands | Im2Hands-main/dependencies/halo/mano_converter/mano_converter.py | import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.insert(0, "/home/korrawe/halo_vae")
from models.halo_adapter.converter import transform_to_canonical
from models.halo_adapter.interface import convert_joints, change_axes
def rot_mat_to_axis_angle(R):
"""
Taken from
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToAngle/derivation/index.htm
"""
# val_1 = (R[:,0,0] + R[:,1,1] + R[:,2,2] - 1) / 2
# angles = torch.acos(val_1)
# denom = 2 * torch.sqrt((val_1 ** 2 - 1).abs())
# x = (R[:,2,1] - R[:,1,2]) / denom
# y = (R[:,0,2] - R[:,2,0]) / denom
# z = (R[:,1,0] - R[:,0,1]) / denom
cos = (R[..., 0, 0] + R[..., 1, 1] + R[..., 2, 2] - 1) / 2
cos = torch.clamp(cos, -1, 1)
angles = torch.acos(cos)
# angles = torch.acos((R[..., 0, 0] + R[..., 1, 1] + R[..., 2, 2] - 1) / 2)
denom = torch.sqrt(
(R[..., 2, 1] - R[..., 1, 2]) ** 2 +
(R[..., 0, 2] - R[..., 2, 0]) ** 2 +
(R[..., 1, 0] - R[..., 0, 1]) ** 2
)
x = (R[..., 2, 1] - R[..., 1, 2]) / denom
y = (R[..., 0, 2] - R[..., 2, 0]) / denom
z = (R[..., 1, 0] - R[..., 0, 1]) / denom
x = x.unsqueeze(-1)
y = y.unsqueeze(-1)
z = z.unsqueeze(-1)
axis = torch.cat((x, y, z), dim=-1)
return axis, angles
def global_rot2mano_axisang(cano2pose_mat):
global_rots = cano2pose_mat[:, :, :3, :3]
R = global_rots
# Convert to local rotation
lvl1_idx = torch.tensor([1, 4, 7, 10, 13])
lvl2_idx = torch.tensor([2, 5, 8, 11, 14])
lvl3_idx = torch.tensor([3, 6, 9, 12, 15])
R_lvl0 = R[:, None, 0]
R_lvl1 = R[:, lvl1_idx]
R_lvl2 = R[:, lvl2_idx]
R_lvl3 = R[:, lvl3_idx]
# Subtract rotation by parent bones
R_lvl1_l = R_lvl0.transpose(-1, -2) @ R_lvl1
R_lvl2_l = R_lvl1.transpose(-1, -2) @ R_lvl2
R_lvl3_l = R_lvl2.transpose(-1, -2) @ R_lvl3
# import pdb; pdb.set_trace()
# R_local_no_order = torch.cat([R[:, None, 0], R_lvl1, R_lvl2_l, R_lvl3_l], dim=1)
R_local_no_order = torch.cat([R[:, None, 0], R_lvl1_l, R_lvl2_l, R_lvl3_l], dim=1)
# HALO order (also MANO internal order)
reorder_idxs = [0, 1, 6, 11, 2, 7, 12, 3, 8, 13, 4, 9, 14, 5, 10, 15]
R_local = R_local_no_order[:, reorder_idxs]
# th_results = torch.cat(all_transforms, 1)[:, reorder_idxs]
axis, angles = rot_mat_to_axis_angle(R_local) # global_rots
# If root rotation is nan, set it to zero
# Use torch.nan_to_num() for pytorch >1.8 https://pytorch.org/docs/1.8.1/generated/torch.nan_to_num.html
# axis = torch.nan_to_num(axis)
# print('axis before:', axis)
axis[torch.isnan(axis)] = 0.
# print('axis_after:', axis)
# import pdb;pdb.set_trace()
# axis[0, 0] = torch.tensor([0., 0., 0.])
axis_angle_mano = axis * angles.unsqueeze(-1)
return axis_angle_mano.reshape(axis_angle_mano.shape[0], -1)
# Shape computation-related
#
#
mano_2_zimm = np.array([0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20])
zimm_2_ours = np.array([0, 1, 5, 9, 13, 17, 2, 6, 10, 14, 18, 3, 7, 11, 15, 19, 4, 8, 12, 16, 20])
zimm_2_mano = np.argsort(mano_2_zimm)
def get_range(idx, len_range):
n_idx = len(idx)
idx = idx.repeat_interleave(len_range) * 3
idx += torch.arange(len_range).repeat(n_idx)
return idx
def initialize_QP(mano_layer, J):
"""
For the QP problem:
bT Q b + cT b + a = bl2
returns Q, c, a, bl2
"""
# Get T,S,M
n_v = 778
n_j = 16
# Extract template mesh T and reshape from V x 3 to 3V
T = mano_layer.th_v_template
T = T.view(3 * n_v)
# Extract Shape blend shapes and reshape from V x 3 x B to 3V x B
S = mano_layer.th_shapedirs
S = S.view(3 * n_v, 10)
# Extract M and re-order to Zimmermann joint ordering.
M = mano_layer.th_J_regressor
# Add entries for the tips. TODO Add actual vertex positions
M = torch.cat((M, torch.zeros((5, n_v)).to(J.device)), dim=0)
# Convert to our joint ordering
M = M[mano_2_zimm][zimm_2_ours]
# Remove entries for tips. TODO Once using actual tip position, remove this step
M = M[:16]
# Construct the 3J x 3V band matrix
M_band = torch.zeros(3 * n_j, 3 * n_v).to(J.device)
fr = -(n_j - 1)
to = n_v
for i in range(fr, to):
# Extract diagonal from M
d = M.diag(i)
# Expand it
d = d.repeat_interleave(3)
# Add it to the final band matrix
M_band.diagonal(3 * i)[:] = d
# Construct Q, c, a and bl for the quadratic equation: bT Q b + cT b + (a - bl2) = 0
# Joint idx in Zimmermann ordering
# idx_p = 10
# idx_c = 11
# Construct child/parent indices
idx_p = torch.cat((torch.tensor([0]*5) , torch.arange(1,11)))
idx_c = torch.arange(1,16)
# Compute bl squared
bl2 = torch.norm(J[idx_c] - J[idx_p], dim=-1).pow(2)
idx_p_range = get_range(idx_p, 3)
idx_c_range = get_range(idx_c, 3)
M_c = M_band[idx_c_range]
M_p = M_band[idx_p_range]
# Exploit additional dimension to make it work across all bones
M_c = M_c.view(15, 3, 3*n_v)
M_p = M_p.view(15, 3, 3*n_v)
T = T.view(1,3*n_v, 1)
S = S.view(1,2334,10)
bl2 = bl2.view(15,1,1)
# Construct M_cp
M_cp = (M_c - M_p).transpose(-1,-2) @ (M_c - M_p)
# DEBUG
# bone_idx = 0 # Should be root/thumb_mcp
# M_c = M_c[bone_idx]
# M_p = M_p[bone_idx]
# M_cp = M_cp[bone_idx]
# bl2 = bl2[bone_idx]
# Compute a
a = T.transpose(-1,-2) @ M_cp @ T
# Compute c
c = (
S.transpose(-1,-2) @ (M_cp.transpose(-1,-2) @ T) +
S.transpose(-1,-2) @ (M_cp @ T)
)
# Compute Q
Q = S.transpose(-1,-2) @ M_cp @ S
return Q, c, a, bl2
def eval_QP(Q,c,a, bl2, b):
b = b.view(1,10,1)
val = ((b.transpose(-1,-2) @ Q @ b) + (c.transpose(-1,-2) @ b) + a)
r = (val - bl2)
return r
def get_J(Q,c, b):
# Jacobian of QP problem
J = Q @ b + c
return J
def newtons_method(Q,c,a, bl2, beta_init, tol=1e-4): # tol=1e-4):
F = eval_QP(Q, c, a, bl2, beta_init)
beta = beta_init.view(1,10,1)
i = 0
while F.abs().max() > tol:
J = get_J(Q,c, beta)
F = eval_QP(Q,c, a, bl2, beta)
# Reshape matrices
J = J.squeeze(-1)
F = F.squeeze(-1)
J_inv = (J.transpose(-1,-2) @ J).inverse() @ J.transpose(-1,-2)
beta = beta - (J_inv @ F).unsqueeze(0)
print(f'(Gauss-Newton) Max. residual: {F.abs().max()} mm')
break # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
i += 1
return beta, i
#
#
# END - Shape computation-related
class ManoConverter(nn.Module):
def __init__(self, mano_layer, bmc_converter, device=None):
super().__init__()
self.mano_layer = mano_layer
self.bmc_converter = bmc_converter
self.device = device
self.axisang_hand_mean = mano_layer.th_hands_mean
def get_mano_shape(self, kps):
'''Using Adrian's method for calculating shape parameter.
Currently take in one hand at a time.
'''
J = convert_joints(kps, source='halo', target='biomech')
J = J.squeeze(0)[:16]
Q, c, a, bl2 = initialize_QP(self.mano_layer, J)
# r = eval_QP(Q,c,a,bl2, b)
# beta_init = torch.zeros_like(b)
beta_init = torch.zeros(10).to(self.device)
b_est, n_iter = newtons_method(Q, c, a, bl2, beta_init)
# Construct MANO hand with estimated betas
shape = b_est.view(1, 10)
# import pdb; pdb.set_trace()
# Defualt mean shape
# shape = torch.zeros(1, 10).to(self.device)
return shape
def get_rest_joint(self, shape):
rot = torch.zeros(1, 10).to(self.device)
pose = torch.zeros(1, 45).to(self.device)
pose_para = torch.cat([rot, pose], 1)
_, _, _, _, rest_pose_joints, _ = self.mano_layer(pose_para, shape)
return rest_pose_joints
def _get_halo_matrices(self, trans_mat):
# Use 16 out of 21 joints for nasa inputs
joints_for_nasa_input = torch.tensor([0, 2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16])
trans_mat = trans_mat[:, joints_for_nasa_input]
return trans_mat
def trans2bmc_cano_mat(self, joints):
'''Assume 'halo' joint order and correct scale (m)
'''
# if joint_order != 'biomech':
kps = convert_joints(joints, source='halo', target='biomech')
# Assume right hand
is_right_vec = torch.ones(kps.shape[0], device=self.device) * True
# Global normalization - normalize index and middle finger root bone plane
normalized_kps, normalization_mat = transform_to_canonical(kps, is_right=is_right_vec)
normalized_kps, change_axes_mat = change_axes(normalized_kps, target='halo')
normalization_mat = torch.matmul(change_axes_mat, normalization_mat)
# import pdb; pdb.set_trace()
# normalization_mat = torch.eye(4).to(self.device).unsqueeze(0)
unpose_mat, _ = self.bmc_converter(normalized_kps, is_right_vec)
# Change back to HALO joint order
unpose_mat = convert_joints(unpose_mat, source='biomech', target='halo')
unpose_mat = self._get_halo_matrices(unpose_mat)
# unpose_mat_scaled = scale_halo_trans_mat(unpose_mat)
full_trans_mat = torch.matmul(unpose_mat, normalization_mat)
return full_trans_mat # normalization_mat
def get_trans_mat(self, rest_joints, hand_joints):
# C
mano_rest2bmc_cano_mat = self.trans2bmc_cano_mat(rest_joints)
# B^-1
posed_hand2bmc_cano_mat = self.trans2bmc_cano_mat(hand_joints)
# BC
mano_rest2posed_hand_mat = torch.matmul(torch.inverse(posed_hand2bmc_cano_mat), mano_rest2bmc_cano_mat)
# import pdb;pdb.set_trace()
return mano_rest2posed_hand_mat
def remove_mean_pose(self, mano_input):
# Subtract mean rotation from pose param
mano_input = torch.cat([
mano_input[:, :3],
mano_input[:, 3:] - self.axisang_hand_mean
], 1)
return mano_input
def to_mano(self, kps):
'''
Take batch of key points in (m) as input .
The keั points must follow HALO joint order.
'''
# Get shape param
shape = self.get_mano_shape(kps)
# From shape param, get mean pose and rest post (conditioned on the shape)
rest_joints = self.get_rest_joint(shape)
# Get trans formation matrices
trans_mat = self.get_trans_mat(rest_joints, kps)
# Convert trans_mat to axis-angle input for MANO
mano_axisang = global_rot2mano_axisang(trans_mat)
# Remove mean pose angle
mano_pose = self.remove_mean_pose(mano_axisang)
# shape = 0
# pose = 0
return shape, mano_pose
def forward(self):
pass
| 10,931 | 33.269592 | 111 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/adapter.py | import sys
import torch
import torch.nn as nn
import numpy as np
from torch.nn.modules import loss
from models.halo_adapter.converter import PoseConverter, transform_to_canonical
from models.halo_adapter.interface import (get_halo_model, convert_joints, change_axes,
get_bone_lengths, scale_halo_trans_mat)
from models.halo_adapter.projection import get_projection_layer
from models.halo_adapter.transform_utils import xyz_to_xyz1
# from models.nasa_adapter.interface_helper
from models.utils import visualize as vis
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class HaloAdapter(nn.Module):
def __init__(self, halo_config_file, store=False, device=None,
straight_hand=True, canonical_pose=None, is_right=True, denoiser_pth=None):
super().__init__()
self.device = device
self.is_right = is_right
# self.halo_model = get_halo_model(halo_config_file)
self.halo_config_file = halo_config_file
self.halo_model, self.halo_generator = get_halo_model(self.halo_config_file)
# Freeze halo
self.freeze_halo()
# 3D keypoints to transformation matrices converter
self.global_normalizer = transform_to_canonical
self.pose_normalizer = PoseConverter(straight_hand=straight_hand) # <- TODO: check this
self.denoising_layer = None
def freeze_halo(self):
for param in self.halo_model.parameters():
param.requires_grad = False
def forward(self, joints, joint_order='biomech', return_kps=False, original_position=False):
'''
Input joints are in (cm)
'''
if joint_order != 'biomech':
joints = convert_joints(joints, source=joint_order, target='biomech')
halo_inputs, normalization_mat, normalized_kps = self.get_halo_inputs(joints)
# print('in adaptor', normalization_mat)
# import pdb; pdb.set_trace()
###
nasa_mesh_out, stat = self.halo_generator.generate_mesh(halo_inputs)
# # nasa_mesh_out.vertices = nasa_mesh_out.vertices * 0.4
# # print(nasa_mesh_out)
# nasa_out_file = os.path.join(args.out_folder, 'nasa_out.obj')
# nasa_mesh_out.export(nasa_out_file)
# nasa_cano_vertices = nasa_mesh_out.vertices.copy()
# Return mesh in the original keypoint locations
if original_position:
# Check scale_halo_trans_mat() in interface.py for global scaling (0.4)
mesh_verts = torch.Tensor(nasa_mesh_out.vertices).float().to(self.device) * 0.4
ori_posi_verts = torch.matmul(torch.inverse(normalization_mat.double()), xyz_to_xyz1(mesh_verts).unsqueeze(-1).double()).squeeze(-1)
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# fig.suptitle('red - before, blue - after', fontsize=16)
# vis.plot_skeleton_single_view(joints.detach().cpu().numpy()[0], joint_order='biomech', color='r', ax=ax, show=False)
# vis.plot_skeleton_single_view(normalized_kps.detach().cpu().numpy()[0], joint_order='biomech', color='b', ax=ax, show=False)
# back_proj = torch.matmul(torch.inverse(normalization_mat), xyz_to_xyz1(normalized_kps).unsqueeze(-1)).squeeze(-1)
# mesh_verts_np = mesh_verts.detach().cpu().numpy() * 100.0
# ori_posi_verts_np = ori_posi_verts.detach().cpu().numpy() * 100.0
# # ax.scatter(mesh_verts_np[:, 0], mesh_verts_np[:, 1], mesh_verts_np[:, 2], c='black', alpha=0.1)
# ax.scatter(ori_posi_verts_np[:, 0], ori_posi_verts_np[:, 1], ori_posi_verts_np[:, 2], c='black', alpha=0.1)
# # vis.plot_skeleton_single_view(back_proj.detach().cpu().numpy()[0] + 0.001, joint_order='biomech', color='orange', ax=ax, show=False)
# fig.show()
# import pdb; pdb.set_trace()
# Scale back from m (HALO) to cm.
nasa_mesh_out.vertices = ori_posi_verts[:, :3].detach().cpu().numpy() * 100.0
# import pdb; pdb.set_trace()
if not return_kps:
return nasa_mesh_out
###
# Undo normalization
# import pdb; pdb.set_trace()
undo_norm_kps = torch.matmul(torch.inverse(normalization_mat), xyz_to_xyz1(normalized_kps).unsqueeze(-1)).squeeze(-1)
normalized_kps = undo_norm_kps
###
return nasa_mesh_out, normalized_kps # halo_outputs
def query_points(self, query_points, joints, joint_order='biomech'):
if joint_order != 'biomech':
joints = convert_joints(joints, source=joint_order, target='biomech')
scale = 100.0
#scale = 40
query_points = query_points / scale
# query_points = query_points * 2.5
# query_points, _ = change_axes(query_points, target='halo')
halo_inputs, normalization_mat, normalized_kps = self.get_halo_inputs(joints)
# import pdb; pdb.set_trace()
query_points = torch.matmul(normalization_mat.unsqueeze(1), xyz_to_xyz1(query_points).unsqueeze(-1)).squeeze(-1)
query_points = query_points[:, :, :3]
query_points = query_points * 2.5
occ_p = self.halo_model(query_points, halo_inputs['inputs'], bone_lengths=halo_inputs['bone_lengths'])
return occ_p
def get_halo_inputs(self, kps):
'''
This adapter globally normalize the input hand before computing the transformation matrices.
The normalization matrix is "not" include in the HALO input.
It is only for putting the mesh back to the position of the keypoints.
Args:
joints
Returns:
halo_input_dict:
normalizeation_mat (in cm): Transformation matrices used to normalized the given pose to origin.
Multiply the inverse of these matrices to go back to target pose.
'''
if not self.is_right:
# TODO: Do left-to-right convertion
pass
is_right_vec = torch.ones(kps.shape[0], device=self.device) * self.is_right
# Scale from cm (VAE) to m (HALO)
scale = 100.0
#scale = 1.0
kps = kps / scale
# Global normalization
normalized_kps, normalization_mat = self.global_normalizer(kps, is_right=is_right_vec)
# Denoise if available
# Use HALO adapter to normalize middle root bone
# target_js = convert_joints(target_js, source='mano', target='biomech')
# target_js, unused_mat = transform_to_canonical(target_js, torch.ones(target_js.shape[0], device=device))
# target_js = convert_joints(target_js, source='biomech', target='mano')
# print(joints)
if self.denoising_layer is not None:
# Denoising layer operates in cm
print("use denoiser in the forward pass")
joints_before = normalized_kps.detach().cpu().numpy()[0]
normalized_kps = self.denoising_layer(normalized_kps)
joints_after = normalized_kps.detach().cpu().numpy()[0]
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
fig.suptitle('blue - before, orange - after', fontsize=16)
vis.plot_skeleton_single_view(joints_before, joint_order='biomech', color='b', ax=ax, show=False)
vis.plot_skeleton_single_view(joints_after, joint_order='biomech', color='orange', ax=ax, show=False)
fig.show()
normalized_kps, change_axes_mat = change_axes(normalized_kps, target='halo')
normalization_mat = torch.matmul(change_axes_mat.double(), normalization_mat.double())
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# fig.suptitle('red - before, blue - after', fontsize=16)
# vis.plot_skeleton_single_view(kps.detach().cpu().numpy()[0], joint_order='biomech', color='r', ax=ax, show=False)
# vis.plot_skeleton_single_view(normalized_kps.detach().cpu().numpy()[0], joint_order='biomech', color='b', ax=ax, show=False)
# back_proj = torch.matmul(torch.inverse(normalization_mat), xyz_to_xyz1(normalized_kps).unsqueeze(-1)).squeeze(-1)
# vis.plot_skeleton_single_view(back_proj.detach().cpu().numpy()[0] + 0.001, joint_order='biomech', color='orange', ax=ax, show=False)
# fig.show()
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
# # Scale from cm (VAE) to m (HALO)
# scale = 100.0
# normalized_kps = normalized_kps / scale
bone_lengths = get_bone_lengths(normalized_kps, source='biomech', target='halo')
# Compute unpose matrices
unpose_mat, _ = self.pose_normalizer(normalized_kps, is_right_vec)
# Test rotation numerical stability
# import pdb; pdb.set_trace()
# unpose_kps = torch.matmul(unpose_mat, xyz_to_xyz1(normalized_kps).unsqueeze(-1)).squeeze(-1)
# vis.plot_skeleton_single_view(unpose_kps.detach().cpu().numpy()[0], joint_order='biomech')
# cal_rotation_angles, _ = self.pose_normalizer(unpose_kps[..., :3], is_right_vec)
# End test rotation
# Change to HALO joint order
unpose_mat = convert_joints(unpose_mat, source='biomech', target='halo')
unpose_mat = self.get_halo_matrices(unpose_mat)
unpose_mat_scaled = scale_halo_trans_mat(unpose_mat)
halo_inputs = self._pack_halo_input(unpose_mat_scaled, bone_lengths)
return halo_inputs, normalization_mat, normalized_kps * scale
def get_halo_matrices(self, trans_mat):
# Use 16 out of 21 joints for nasa inputs
joints_for_nasa_input = torch.tensor([0, 2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16])
trans_mat = trans_mat[:, joints_for_nasa_input]
return trans_mat
def _pack_halo_input(self, unpose_mat_scaled, bone_lengths):
halo_inputs = {
'inputs': unpose_mat_scaled,
'bone_lengths': bone_lengths
}
return halo_inputs
def optimize_trans(self, object_points, joints, gt_object_mesh, joint_order='mano'):
epoch_coarse = 10
# import pdb; pdb.set_trace()
batch_size = joints.shape[0]
trans = torch.zeros(batch_size, 3).to(self.device)
trans.requires_grad_()
fix_joints = joints.detach().clone()
# # test query box
# inside_points = torch.rand(1, 4000, 3).cuda() - 0.5
# inside_points = inside_points * 12.
# object_points = inside_points
inside_points = object_points
# from trimesh.base import Trimesh
# obj_points_tmp = inside_points[0].detach().cpu().numpy()
# gt_object_points = Trimesh(vertices=obj_points_tmp)
# obj_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/obj.obj'
# gt_object_points.export(obj_path)
# tmp_joints = fix_joints[None, 0]
# output_mesh = self.forward(tmp_joints, joint_order='mano', original_position=True)
# meshout_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/hand_0.obj'
# output_mesh.export(meshout_path)
# occ_p = self.query_points(inside_points, tmp_joints, joint_order=joint_order)
# one_idx = occ_p[0] > 0.5
# obj_points_tmp = inside_points[0, one_idx].detach().cpu().numpy()
# gt_object_points = Trimesh(vertices=obj_points_tmp)
# obj_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/intersect.obj'
# gt_object_points.export(obj_path)
# import pdb; pdb.set_trace()
# Optimize for global translation (no trans)
optimizer = torch.optim.Adam([trans], lr=0.1) # trans
for i in range(0, epoch_coarse):
new_joints = fix_joints + trans
occ_p = self.query_points(object_points, new_joints, joint_order=joint_order)
occ_p = torch.where(occ_p > 0.5, occ_p, torch.zeros_like(occ_p))
penetration_loss = occ_p.mean()
# _, hand_joints = mano_layer(torch.cat((rot, pose), 1), shape, trans)
# loss = criteria_loss(hand_joints, target_js)
# print(loss)
optimizer.zero_grad()
penetration_loss.backward()
optimizer.step()
tmp_joints = fix_joints + trans
# from trimesh.base import Trimesh
# tmp_joints = pred[None, 0]
# output_mesh = self.forward(tmp_joints, joint_order='mano', original_position=True)
# meshout_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/hand_%d.obj' % (i+1)
# output_mesh.export(meshout_path)
# print(' loss :', penetration_loss.item())
# print(' trans :', trans)
# pdb.set_trace()
# new_joints = fix_joints + trans
# output_mesh = self.forward(new_joints, joint_order='mano', original_position=True)
# meshout_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/hand_end.obj'
# output_mesh.export(meshout_path)
# obj_out_path = '/home/korrawe/halo_vae/exp/grab_refine_inter_2/test_optim/obj_mesh.obj'
# gt_object_mesh.export(obj_out_path)
# print('After coarse alignment: %6f' % (penetration_loss.item()))
# query_points(self, query_points, joints, joint_order='biomech')
# import pdb; pdb.set_trace()
return trans
| 13,414 | 44.941781 | 148 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/trans_mat_model.py | import torch
from torch._C import device
import torch.nn as nn
import numpy as np
class TransformationModel(nn.Module):
def __init__(self, D_in=21 * 3, H=256, D_out=15 * 3, device="cpu"):
"""
Crate a two-layers networks with relu activation.
"""
super(TransformationModel, self).__init__()
self.device = device
self.linear1 = torch.nn.Linear(D_in, H) # , bias=False)
self.linear2 = torch.nn.Linear(H, H) # , bias=False)
self.linear3 = torch.nn.Linear(H, H) # , bias=False)
self.rot_head = torch.nn.Linear(H, D_out)
self.tran_head = torch.nn.Linear(H, D_out)
self.actvn = nn.LeakyReLU(0.1)
def forward(self, x):
# import pdb;pdb.set_trace()
zero = torch.zeros([x.shape[0], 1, 3], device=x.device)
x = x.reshape(-1, 21 * 3)
x = self.linear1(x)
x = self.linear2(self.actvn(x)) + x
x = self.linear3(self.actvn(x)) + x
# y_pred = x.reshape(-1, 21, 3)
# import pdb; pdb.set_trace()
rot_out = self.rot_head(x)
rot_out = rot_out.reshape(-1, 15, 3)
rot_out = torch.cat([zero, rot_out], 1)
tran_out = self.tran_head(x)
tran_out = tran_out.reshape(-1, 15, 3)
tran_out = torch.cat([zero, tran_out], 1)
return rot_out, tran_out
def get_transformation_layer(model_path):
model = torch.load(model_path)
return model
| 1,438 | 29.617021 | 71 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/transform_utils.py | import torch
def xyz_to_xyz1(xyz):
""" Convert xyz vectors from [BS, ..., 3] to [BS, ..., 4] for matrix multiplication
"""
ones = torch.ones([*xyz.shape[:-1], 1], device=xyz.device)
# print("xyz shape", xyz.shape)
# print("one", ones.shape)
return torch.cat([xyz, ones], dim=-1)
def pad34_to_44(mat):
last_row = torch.tensor([0., 0., 0., 1.], device=mat.device).reshape(1, 4).repeat(*mat.shape[:-2], 1, 1)
return torch.cat([mat, last_row], dim=-2) | 483 | 31.266667 | 108 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/projection.py | import torch
import torch.nn as nn
import numpy as np
class JointProjectionLayer(nn.Module):
def __init__(self, D_in=21 * 3, H=256, D_out=21 * 3, device="cpu", fix_root=True):
"""
Crate a two-layers networks with relu activation.
"""
super(JointProjectionLayer, self).__init__()
self.device = device
self.linear1 = torch.nn.Linear(D_in, H) # , bias=False)
self.linear2 = torch.nn.Linear(H, H) # , bias=False)
self.linear3 = torch.nn.Linear(H, D_out) # , bias=False)
self.actvn = nn.LeakyReLU(0.1)
self.fix_root = fix_root
self.endpoints = np.array([0])
self.endpoints_flat = []
for idx in self.endpoints:
for j in range(3):
self.endpoints_flat.append(idx * 3 + j)
self.endpoints_flat = np.array(self.endpoints_flat)
# print("end point flat = ", self.endpoints_flat)
def forward(self, x):
# import pdb;pdb.set_trace()
endpoints_in = x[:, self.endpoints].clone()
x = x.reshape(-1, 21 * 3)
x = self.linear1(x)
x = self.linear2(self.actvn(x))
x = self.linear3(self.actvn(x))
y_pred = x.reshape(-1, 21, 3)
if self.fix_root:
y_pred[:, self.endpoints] = endpoints_in
return y_pred
def get_projection_layer(model_path):
model = torch.load(model_path)
return model
| 1,415 | 30.466667 | 86 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/converter_ref.py | # ------------------------------------------------------------------------------
# Copyright (c) 2019 Adrian Spurr
# Licensed under the GPL License.
# Written by Adrian Spurr
# ------------------------------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from common.utils.transforms_torch import xyz_to_xyz1, pad34_to_44
eps = torch.tensor(1e-6)
def batch_dot_product(batch_1, batch_2, keepdim=False):
""" Performs the batch-wise dot product
"""
# n_elem = batch_1.size(1)
# batch_size = batch_1.size(0)
# # Idx of the diagonal
# diag_idx = torch.tensor([[i,i] for i in range(n_elem)]).long()
# # Perform for each element of batch a matmul
# batch_prod = torch.matmul(batch_1, batch_2.transpose(2,1))
# # Extract the diagonal
# batch_dot_prod = batch_prod[:, diag_idx[:,0], diag_idx[:,1]]
# if keepdim:
# batch_dot_prod = batch_dot_prod.reshape(batch_size, -1, 1)
batch_dot_prod = (batch_1 * batch_2).sum(-1, keepdim=keepdim)
return batch_dot_prod
def rotate_axis_angle(v, k, theta):
# Rotate v around k by theta using rodrigues rotation formula
v_rot = v * torch.cos(theta) + \
torch.cross(k,v)*torch.sin(theta) + \
k*batch_dot_product(k,v, True)*(1-torch.cos(theta))
return v_rot
def clip_values(x, min_v, max_v):
clipped = torch.min(torch.max(x, min_v), max_v)
return clipped
def pyt2np(x):
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
return x
def normalize(bv, eps=1e-8):
"""
Normalizes the last dimension of bv such that it has unit length in
euclidean sense
"""
eps_mat = torch.tensor(eps, device=bv.device)
norm = torch.max(torch.norm(bv, dim=-1, keepdim=True), eps_mat)
bv_n = bv / norm
return bv_n
def angle2(v1, v2):
"""
Numerically stable way of calculating angles.
See: https://scicomp.stackexchange.com/questions/27689/numerically-stable-way-of-computing-angles-between-vectors
"""
eps = 1e-10
eps_mat = torch.tensor([eps], device=v1.device)
n_v1 = v1 / torch.max(torch.norm(v1, dim=-1, keepdim=True), eps_mat)
n_v2 = v2 / torch.max(torch.norm(v2, dim=-1, keepdim=True), eps_mat)
a = 2 * torch.atan2(
torch.norm(n_v1 - n_v2, dim=-1), torch.norm(n_v1 + n_v2, dim=-1)
)
return a
def get_alignment_mat(v1, v2):
"""
Returns the rotation matrix R, such that R*v1 points in the same direction as v2
"""
axis = cross(v1, v2, do_normalize=True)
ang = angle2(v1, v2)
R = rotation_matrix(ang, axis)
return R
def transform_to_canonical(kp3d, is_right):
"""Undo global rotation
"""
global_rot = compute_canonical_transform(kp3d, is_right)
kp3d = xyz_to_xyz1(kp3d)
# import pdb
# pdb.set_trace()
kp3d_canonical = torch.matmul(global_rot.unsqueeze(1), kp3d.unsqueeze(-1))
kp3d_canonical = kp3d_canonical.squeeze(-1)
# Pad T from 3x4 mat to 4x4 mat
global_rot = pad34_to_44(global_rot)
return kp3d_canonical, global_rot
def compute_canonical_transform(kp3d, is_right):
"""
Returns a transformation matrix T which when applied to kp3d performs the following
operations:
1) Center at the root (kp3d[:,0])
2) Rotate such that the middle root bone points towards the y-axis
3) Rotates around the x-axis such that the YZ-projection of the normal of the plane
spanned by middle and index root bone points towards the z-axis
"""
assert len(kp3d.shape) == 3, "kp3d need to be BS x 21 x 3"
assert is_right.shape[0] == kp3d.shape[0]
is_right = is_right.type(torch.bool)
dev = kp3d.device
bs = kp3d.shape[0]
kp3d = kp3d.clone().detach()
# Flip so that we compute the correct transformations below
kp3d[~is_right, :, 1] *= -1
# Align root
tx = kp3d[:, 0, 0]
ty = kp3d[:, 0, 1]
tz = kp3d[:, 0, 2]
# Translation
T_t = torch.zeros((bs, 3, 4), device=dev)
T_t[:, 0, 3] = -tx
T_t[:, 1, 3] = -ty
T_t[:, 2, 3] = -tz
T_t[:, 0, 0] = 1
T_t[:, 1, 1] = 1
T_t[:, 2, 2] = 1
# Align middle root bone with -y-axis
# x_axis = torch.tensor([[1.0, 0.0, 0.0]], device=dev).expand(bs, 3) # FIXME
y_axis = torch.tensor([[0.0, -1.0, 0.0]], device=dev).expand(bs, 3)
v_mrb = normalize(kp3d[:, 3] - kp3d[:, 0])
R_1 = get_alignment_mat(v_mrb, y_axis)
# Align x-y plane along plane spanned by index and middle root bone of the hand
# after R_1 has been applied to it
v_irb = normalize(kp3d[:, 2] - kp3d[:, 0])
normal = cross(v_mrb, v_irb).view(-1, 1, 3)
normal_rot = torch.matmul(normal, R_1.transpose(1, 2)).view(-1, 3)
z_axis = torch.tensor([[0.0, 0.0, 1.0]], device=dev).expand(bs, 3)
R_2 = get_alignment_mat(normal_rot, z_axis)
# Include the flipping into the transformation
T_t[~is_right, 1, 1] = -1
# Compute the canonical transform
T = torch.bmm(R_2, torch.bmm(R_1, T_t))
return T
def set_equal_xyz_scale(ax, X, Y, Z):
max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
return ax
def set_axes_equal(ax: plt.Axes):
"""Set 3D plot axes to equal scale.
Make axes of 3D plot have equal scale so that spheres appear as
spheres and cubes as cubes. Required since `ax.axis('equal')`
and `ax.set_aspect('equal')` don't work on 3D.
"""
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
_set_axes_radius(ax, origin, radius)
def _set_axes_radius(ax, origin, radius):
x, y, z = origin
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius, z + radius])
def plot_local_coord_system(local_cs, bones, bone_lengths, root, ax):
local_cs = pyt2np(local_cs.squeeze())
bones = pyt2np(bones.squeeze())
bone_lengths = pyt2np(bone_lengths.squeeze())
root = pyt2np(root.squeeze())
col = ['r','g','b']
n_fingers = 5
n_b_per_f = 4
print("local_cs", local_cs)
for k in range(n_fingers):
start_point = root.copy()
for i in range(n_b_per_f):
idx = i * n_fingers + k
for j in range(3):
ax.quiver(start_point[0], start_point[1], start_point[2],
local_cs[idx, j, 0], local_cs[idx, j, 1], local_cs[idx, j, 2],
color=col[j], length=10.1)
old_start_point = start_point.copy()
start_point += (bones[idx] * bone_lengths[idx])
ax.plot([old_start_point[0], start_point[0]],
[old_start_point[1], start_point[1]],
[old_start_point[2], start_point[2]],
color="black")
set_axes_equal(ax)
plt.show()
def plot_local_coord(local_coords, bone_lengths, root, ax, show=True):
local_coords = pyt2np(local_coords.squeeze())
# bones = pyt2np(bones.squeeze())
bone_lengths = pyt2np(bone_lengths.squeeze())
root = pyt2np(root.squeeze())
# root = root[0]
print("root", root.shape)
col = ['r', 'g', 'b']
n_fingers = 5
n_b_per_f = 4
# print("local_cs", local_coords)
for k in range(n_fingers):
start_point = root.copy()
for i in range(n_b_per_f):
idx = i * n_fingers + k
# for j in range(3):
# print("local_coords[idx]", local_coords[idx])
local_bone = (local_coords[idx] * bone_lengths[idx])
target_point = start_point + local_bone
# print("start_point", start_point, "to", local_bone)
cc = 'r' if show else 'b'
ax.plot([start_point[0], target_point[0]],
[start_point[1], target_point[1]],
[start_point[2], target_point[2]],
color=cc)
ax.scatter([target_point[0]], [target_point[1]], [target_point[2]], s=10.0)
# start_point += (bones[idx] * bone_lengths[idx])
start_point += (local_bone)
set_axes_equal(ax)
if show:
plt.show()
def rotation_matrix(angles, axis):
"""
Converts Rodrigues rotation formula into a rotation matrix
"""
eps = torch.tensor(1e-6)
# print("norm", torch.abs(torch.sum(axis ** 2, dim=-1) - 1))
try:
assert torch.any(
torch.abs(torch.sum(axis ** 2, dim=-1) - 1) < eps
), "axis must have unit norm"
except:
print("Warning: axis does not have unit norm")
# import pdb
# pdb.set_trace()
dev = angles.device
batch_size = angles.shape[0]
sina = torch.sin(angles).view(batch_size, 1, 1)
cosa_1_minus = (1 - torch.cos(angles)).view(batch_size, 1, 1)
a_batch = axis.view(batch_size, 3)
o = torch.zeros((batch_size, 1), device=dev)
a0 = a_batch[:, 0:1]
a1 = a_batch[:, 1:2]
a2 = a_batch[:, 2:3]
cprod = torch.cat((o, -a2, a1, a2, o, -a0, -a1, a0, o), 1).view(batch_size, 3, 3)
I = torch.eye(3, device=dev).view(1, 3, 3)
R1 = cprod * sina
R2 = cprod.bmm(cprod) * cosa_1_minus
R = I + R1 + R2
return R
def cross(bv_1, bv_2, do_normalize=False):
"""
Computes the cross product of the last dimension between bv_1 and bv_2.
If normalize is True, it normalizes the vector to unit length.
"""
cross_prod = torch.cross(bv_1, bv_2)
if do_normalize:
cross_prod = normalize(cross_prod)
return cross_prod
def rotate(v, ax, rad):
"""
Uses Rodrigues rotation formula
Rotates the vectors in v around the axis in ax by rad radians. These
operations are applied on the last dim of the arguments. The parameter rad
is given in radian
"""
# print("v", v, v.shape)
# print("ax", ax, ax.shape)
# print("rad", rad)
sin = torch.sin
cos = torch.cos
v_rot = (
v * cos(rad) + cross(ax, v) * sin(rad) + ax * batch_dot_product(ax, v, True) * (1 - cos(rad))
)
return v_rot
class PoseConverter(nn.Module):
def __init__(self, store=False, dev=None, straight_hand=True, canonical_pose=None):
super().__init__()
# assert angle_poly.shape[0] == 15
if not dev:
dev = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
self.store = store
self.dev = dev
# self.angle_poly = angle_poly.to(dev)
self.idx_1 = torch.arange(1,21).to(dev).long()
self.idx_2 = torch.zeros(20).to(dev).long()
self.idx_2[:5] = 0
self.idx_2[5:] = torch.arange(1,16)
# For preprocess joints
self.shift_factor = 0 # 0.5
# For poly distance
# n_poly = angle_poly.shape[0]
# n_vert = angle_poly.shape[1]
# idx_1 = torch.arange(n_vert)
# idx_2 = idx_1.clone()
# idx_2[:-1] = idx_1[1:]
# idx_2[-1] = idx_1[0]
# # n_poly x n_edges x 2
# v1 = angle_poly[:,idx_1].to(dev)
# v2 = angle_poly[:,idx_2].to(dev)
# # n_poly x n_edges x 2
# edges = (v2 - v1).view(1, n_poly, n_vert, 2)
# Store
# self.v1 = v1
# self.v2 = v2
# self.n_poly = n_poly
# self.n_vert = n_vert
# self.edges = edges
self.dot = lambda x,y: (x*y).sum(-1)
# self.l2 = self.dot(edges,edges)
self.zero = torch.zeros((1), device=dev)
self.one = torch.ones((1), device=dev)
# For angle computation
self.rb_idx = torch.arange(5, device=dev).long()
self.nrb_idx_list = []
for i in range(2, 4):
self.nrb_idx_list += [torch.arange(i*5, (i+1)*5, device=dev).long()]
self.nrb_idx = torch.arange(5,20, device=dev).long()
self.one = torch.ones((1), device=dev)
self.zero = torch.zeros((1), device=dev)
self.eps_mat = torch.tensor(1e-9, device=dev)
self.eps = eps
self.eps_poly = 1e-2
self.y_axis = torch.tensor([[[0,1.,0]]], device=dev)
self.x_axis = torch.tensor([[[1.,0,0]]], device=dev)
self.z_axis = torch.tensor([[[0],[0],[1.]]], device=dev)
self.xz_mat = torch.tensor([[[[1.,0,0],[0,0,0],[0,0,1]]]], device=dev)
self.yz_mat = torch.tensor([[[[0,0,0],[0,1.,0],[0,0,1]]]], device=dev)
self.flipLR = torch.tensor([[[-1.,1.,1.]]], device=dev)
self.bones = None
self.bone_lengths = None
self.local_cs = None
self.local_coords = None
self.rot_angles = None
if canonical_pose is not None:
self.initialize_canonical_pose(canonical_pose)
else:
# initialize canonical pose with some value
self.root_plane_angles = np.array([0.8, 0.2, 0.2])
self.root_bone_angles = np.array([0.4, 0.2, 0.2, 0.2])
if straight_hand:
# For NASA - no additional rotation
self.canonical_rot_angles = torch.tensor([[[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]]]
)
else:
# # For MANO
self.canonical_rot_angles = torch.tensor([[[-6.8360e-01, 2.8175e-01],
[-1.3016e+00, -1.4236e-03],
[-1.5708e+00, -1.4236e-03],
[-1.8425e+00, 7.4600e-02],
[-2.0746e+00, 1.8700e-01],
[-4.2529e-01, 1.4156e-01],
[-1.5473e-01, 1.8678e-01],
[-7.1449e-02, 1.4358e-01],
[-8.7801e-02, -1.5822e-01],
[-1.4013e-01, 3.4336e-02],
[ 3.3824e-01, 1.6999e-01],
[ 1.8830e-01, 4.8844e-02],
[ 1.1238e-01, -8.7551e-03],
[ 1.1125e-01, 1.6023e-01],
[ 5.3791e-02, -4.2887e-02],
[-1.0314e-01, -1.2587e-02],
[-9.8003e-02, -1.7479e-01],
[-6.6223e-02, 2.9800e-02],
[-2.4101e-01, -5.5725e-02],
[-1.3926e-01, -8.2840e-02]]]
)
def initialize_canonical_pose(self, canonical_joints):
# canonical_joints must be of size [1, 21]
# Pre-process the joints
joints = self.preprocess_joints(canonical_joints, torch.ones(canonical_joints.shape[0], device=canonical_joints.device))
# Compute the bone vectors
bones, bone_lengths, kp_to_bone_mat = self.kp3D_to_bones(joints)
self.root_plane_angles = self._compute_root_plane_angle(bones) # np.array([0.8, 0.2, 0.2])
self.root_bone_angles = self._compute_root_bone_angle(bones) # np.array([0.4, 0.2, 0.2, 0.2])
# Compute the local coordinate systems for each bone
# This assume the root bones are fixed
local_cs = self.compute_local_coordinate_system(bones)
# Compute the local coordinates
local_coords = self.compute_local_coordinates(bones, local_cs)
# Copmute the rotation around the y and rotated-x axis
self.canonical_rot_angles = self.compute_rot_angles(local_coords)
# check dimentions
import pdb; pdb.set_trace()
pass
def _compute_root_plane_angle(self, bones):
root_plane_angle = np.zeros(3)
# canonical_angle defines angles between root bone planes
# angle between (n0,n1), (n1,n2), (n2,n3)
n2 = torch.cross(bones[:, 3], bones[:, 2])
# ring and middle (plane n1)
n1 = torch.cross(bones[:, 2], bones[:, 1])
root_plane_angle[1] = angle2(n1, n2).squeeze(0)
# thumb and index (plane n0)
n0 = torch.cross(bones[:, 1], bones[:, 0])
root_plane_angle[0] = angle2(n0, n1).squeeze(0)
# ring and pinky (plane n4)
n3 = torch.cross(bones[:, 4], bones[:, 3])
root_plane_angle[2] = angle2(n2, n3).squeeze(0)
return root_plane_angle
def _compute_root_bone_angle(self, bones):
root_bone_angles = np.zeros(4)
root_bone_angles[0] = angle2(bones[:, 1], bones[:, 0]).squeeze(0)
root_bone_angles[1] = angle2(bones[:, 2], bones[:, 1]).squeeze(0)
root_bone_angles[2] = angle2(bones[:, 3], bones[:, 2]).squeeze(0)
root_bone_angles[3] = angle2(bones[:, 4], bones[:, 3]).squeeze(0)
return root_bone_angles
def kp3D_to_bones(self, kp_3D):
"""
Converts from joints to bones
"""
dev = self.dev
eps_mat = self.eps_mat
batch_size = kp_3D.shape[0]
bones = kp_3D[:, self.idx_1] - kp_3D[:, self.idx_2] # .detach()
bone_lengths = torch.max(torch.norm(bones, dim=2, keepdim=True), eps_mat)
bones = bones / bone_lengths
translate = torch.eye(4, device=dev).repeat(batch_size, 20, 1, 1)
# print("translate", translate.shape)
# print("kp 3D", kp_3D.shape)
translate[:, :20, :3, 3] = -1. * kp_3D[:, self.idx_2]
# print(translate.detach())
scale = torch.eye(4, device=dev).repeat(batch_size, 20, 1, 1)
# print("bone_lengths", bone_lengths.shape)
scale = scale * 1. / bone_lengths.unsqueeze(-1)
scale[:, :, 3, 3] = 1.0
# print("scale", scale)
kp_to_bone_mat = torch.matmul(scale, translate)
# print(kp_to_bone_mat)
# import pdb;pdb.set_trace()
# assert False
return bones, bone_lengths, kp_to_bone_mat
def compute_bone_to_kp_mat(self, bone_lengths, local_coords_canonical):
bone_to_kp_mat = torch.eye(4, device=bone_lengths.device).repeat(*bone_lengths.shape[:2], 1, 1)
# scale
bone_to_kp_mat = bone_to_kp_mat * bone_lengths.unsqueeze(-1)
bone_to_kp_mat[:, :, 3, 3] = 1.0
# print("bone_to_kp_mat", bone_to_kp_mat, bone_to_kp_mat.shape)
# assert False
# add translation along kinematic chain
# no root
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
bones_scaled = local_coords_canonical * bone_lengths
# print("bone length shape", bone_lengths.shape)
lev_1_trans = torch.zeros([bone_lengths.shape[0], 5, 3], device=bone_lengths.device)
lev_2_trans = bones_scaled[:, lev_1]
lev_3_trans = bones_scaled[:, lev_2] + lev_2_trans
lev_4_trans = bones_scaled[:, lev_3] + lev_3_trans
translation = torch.cat([lev_1_trans, lev_2_trans, lev_3_trans, lev_4_trans], dim=1)
# print("translation", translation.shape)
bone_to_kp_mat[:, :, :3, 3] = translation # [:, :, :]
# print(bone_to_kp_mat)
# assert False
return bone_to_kp_mat
def compute_local_coordinate_system(self, bones):
dev = self.dev
dot = batch_dot_product
n_fingers = 5
batch_size = bones.size(0)
n_bones = bones.size(1)
rb_idx = self.rb_idx
# rb_idx_bin = self.rb_idx_bin
nrb_idx_list = self.nrb_idx_list
one = self.one
eps = self.eps
eps_mat = self.eps_mat
xz_mat = self.xz_mat
z_axis = self.z_axis
bs = bones.size(0)
y_axis = self.y_axis.repeat(bs, 5, 1)
x_axis = self.x_axis.repeat(bs, 5, 1)
# Get the root bones
root_bones = bones[:, rb_idx]
# Compute the plane normals for each neighbouring root bone pair
# Compute the plane normals directly
plane_normals = torch.cross(root_bones[:,:-1], root_bones[:,1:], dim=2)
# Compute the plane normals flipped (sometimes gives better grad)
# WARNING: Uncomment flipping below
# plane_normals = torch.cross(root_bones[:,1:], root_bones[:,:-1], dim=2)
# Normalize them
plane_norms = torch.norm(plane_normals, dim=2, keepdim=True)
plane_norms = torch.max(plane_norms, eps_mat)
plane_normals = plane_normals / plane_norms
# Define the normals of the planes on which the fingers reside (model assump.)
finger_plane_norms = torch.zeros((batch_size, n_fingers, 3), device=dev)
finger_plane_norms[:,0] = plane_normals[:,0]
finger_plane_norms[:,1] = plane_normals[:,1]
finger_plane_norms[:,2] = (plane_normals[:,1] + plane_normals[:,2]) / 2
finger_plane_norms[:,3] = (plane_normals[:,2] + plane_normals[:,3]) / 2
finger_plane_norms[:,4] = plane_normals[:,3]
# Flip the normals s.t they look towards the palm of the hands
# finger_plane_norms = -finger_plane_norms
# Root bones are in the global coordinate system
coord_systems = torch.zeros((batch_size, n_bones, 3, 3), device=dev)
# Root bone coordinate systems
coord_systems[:, rb_idx] = torch.eye(3, device=dev)
# Root child bone coordinate systems
z = bones[:, rb_idx]
y = torch.cross(bones[:,rb_idx], finger_plane_norms)
x = torch.cross(y,z)
# Normalize to unit length
x_norm = torch.max(torch.norm(x, dim=2, keepdim=True), eps_mat)
x = x / x_norm
y_norm = torch.max(torch.norm(y, dim=2, keepdim=True), eps_mat)
y = y / y_norm
# Parent bone is already normalized
# z = z / torch.norm(z, dim=2, keepdim=True)
# Assign them to the coordinate system
coord_systems[:, rb_idx + 5, 0] = x
coord_systems[:, rb_idx + 5, 1] = y
coord_systems[:, rb_idx + 5, 2] = z
# Construct the remaining bone coordinate systems iteratively
# TODO This can be potentially sped up by rotating the root child bone
# instead of the parent bone
for i in range(2, 4):
idx = nrb_idx_list[i - 2]
bone_vec_grandparent = bones[:, idx - 2 * 5]
bone_vec_parent = bones[:, idx - 1 * 5]
# bone_vec_child = bones[:,idx]
p_coord = coord_systems[:, idx - 1 * 5]
###### IF BONES ARE STRAIGHT LINE
# Transform into local coordinates
lbv_1 = torch.matmul(p_coord, bone_vec_grandparent.unsqueeze(-1))
lbv_2 = torch.matmul(p_coord, bone_vec_parent.unsqueeze(-1))
###### Angle_xz
# Project onto local xz plane
lbv_2_xz = torch.matmul(xz_mat, lbv_2).squeeze(-1)
lbv_2 = lbv_2.squeeze(-1)
# Compute the dot product
dot_prod_xz = torch.matmul(lbv_2_xz, z_axis).squeeze(-1)
# If dot product is close to zero, set it to zero
cond_0 = (torch.abs(dot_prod_xz) < 1e-6).float()
dot_prod_xz = cond_0 * 0 + (1 - cond_0) * dot_prod_xz
# Compute the norm and make sure its non-zero
norm_xz = torch.max(torch.norm(lbv_2_xz, dim=-1), eps_mat)
# Normalize the dot product
dot_prod_xz = dot_prod_xz / norm_xz
# Clip such that we do not get NaNs during GD
dot_prod_xz = clip_values(dot_prod_xz, -one+eps, one-eps)
# Compute the angle from the z-axis
angle_xz = torch.acos(dot_prod_xz)
# If lbv2_xz is on the -x side, we interpret it as -angle
cond_1 = ((lbv_2_xz[:,:,0] + 1e-6) < 0).float()
angle_xz = cond_1 * (-angle_xz) + (1-cond_1) * angle_xz
###### Angle_yz
# Compute the normalized dot product
dot_prod_yz = batch_dot_product(lbv_2_xz, lbv_2).squeeze(-1)
dot_prod_yz = dot_prod_yz / norm_xz
dot_prod_yz = clip_values(dot_prod_yz, -one+eps, one-eps)
# Compute the angle from the projected bone
angle_yz = torch.acos(dot_prod_yz)
# If bone is on -y side, we interpret it as -angle
cond_2 = ((lbv_2[:,:,1] + 1e-6) < 0).float()
angle_yz = cond_2 * (-angle_yz) + (1-cond_2) * angle_yz
###### Compute the local coordinate system
angle_xz = angle_xz.unsqueeze(-1)
angle_yz = angle_yz.unsqueeze(-1)
# Transform rotation axis to global
rot_axis_xz = torch.matmul(p_coord.transpose(2,3),
y_axis.unsqueeze(-1))
rot_axis_y = rotate_axis_angle(x_axis, y_axis, angle_xz)
rot_axis_y = torch.matmul(p_coord.transpose(2,3),
rot_axis_y.unsqueeze(-1))
rot_axis_y = rot_axis_y.squeeze(-1)
rot_axis_xz = rot_axis_xz.squeeze(-1)
cond = (torch.abs(angle_xz) < eps).float()
x = cond*x + (1-cond)*rotate_axis_angle(x, rot_axis_xz, angle_xz)
y = cond*y + (1-cond)*rotate_axis_angle(y, rot_axis_xz, angle_xz)
z = cond*z + (1-cond)*rotate_axis_angle(z, rot_axis_xz, angle_xz)
# Rotate around rotated x/-x
cond = (torch.abs(angle_yz) < eps).float()
x = cond*x + (1-cond)*rotate_axis_angle(x, rot_axis_y, -angle_yz)
y = cond*y + (1-cond)*rotate_axis_angle(y, rot_axis_y, -angle_yz)
z = cond*z + (1-cond)*rotate_axis_angle(z, rot_axis_y, -angle_yz)
coord_systems[:, idx, 0] = x
coord_systems[:, idx, 1] = y
coord_systems[:, idx, 2] = z
return coord_systems.detach()
def compute_local_coordinates(self, bones, coord_systems):
local_coords = torch.matmul(coord_systems, bones.unsqueeze(-1))
return local_coords.squeeze(-1)
def compute_rot_angles(self, local_coords):
n_bones = local_coords.size(1)
z_axis = self.z_axis
xz_mat = self.xz_mat
yz_mat = self.yz_mat
one = self.one
eps = self.eps
eps_mat = self.eps_mat
# Compute the flexion angle
# Project bone onto the xz-plane
proj_xz = torch.matmul(xz_mat, local_coords.unsqueeze(-1)).squeeze(-1)
norm_xz = torch.max(torch.norm(proj_xz, dim=-1), eps_mat)
dot_prod_xz = torch.matmul(proj_xz, z_axis).squeeze(-1)
cond_0 = (torch.abs(dot_prod_xz) < 1e-6).float()
dot_prod_xz = cond_0 * 0 + (1-cond_0) * dot_prod_xz
dot_prod_xz = dot_prod_xz / norm_xz
dot_prod_xz = clip_values(dot_prod_xz, -one+eps, one-eps)
# Compute the angle from the z-axis
angle_xz = torch.acos(dot_prod_xz)
# If proj_xz is on the -x side, we interpret it as -angle
cond_1 = ((proj_xz[:,:,0] + 1e-6) < 0).float()
angle_xz = cond_1 * (-angle_xz) + (1-cond_1) * angle_xz
# Compute the abduction angle
dot_prod_yz = batch_dot_product(proj_xz, local_coords).squeeze(-1)
dot_prod_yz = dot_prod_yz / norm_xz
dot_prod_yz = clip_values(dot_prod_yz, -one+eps, one-eps)
# Compute the angle from the projected bone
angle_yz = torch.acos(dot_prod_yz)
# If bone is on y side, we interpret it as -angle
cond_2 = ((local_coords[:,:,1] + 1e-6) > 0).float()
angle_yz = cond_2 * (-angle_yz) + (1-cond_2) * angle_yz
# Concatenate both matrices
rot_angles = torch.cat((angle_xz.unsqueeze(-1), angle_yz.unsqueeze(-1)),
dim=-1)
return rot_angles
def preprocess_joints(self, joints, is_right):
"""
This function does the following:
- Move palm-centered root to wrist-centered root
- Root-center (for easier flipping)
- Flip left hands to right
"""
# Had to formulate it this way such that backprop works
joints_pp = 0 + joints
# Vector from palm to wrist (simplified expression on paper)
vec = joints[:,0] - joints[:,3]
vec = vec / torch.norm(vec, dim=1, keepdim=True)
# This is BUG !!!!
# Shift palm in direction wrist with factor shift_factor
joints_pp[:,0] = joints[:,0] + self.shift_factor * vec
# joints_pp[:,0] = 2*joints[:,0] - joints[:,3]
# joints_pp = joints_pp - joints_pp[:,0]
# if not kp3d_is_right:
# joints_pp = joints_pp * torch.tensor([[-1.,1.,1.]]).view(-1,1,3)
# Flip left handed joints
is_right = is_right.view(-1,1,1)
joints_pp = joints_pp * is_right + (1-is_right) * joints_pp * self.flipLR
# DEBUG
# joints = joints.clone()
# palm = joints[:,0]
# middle_mcp = joints[:,3]
# wrist = 2*palm - middle_mcp
# joints[:,0] = wrist
# # Root-center (for easier flipping)
# joints = joints - joints[:,0]
# # Flip if left hand
# if not kp3d_is_right:
# joints[:,:,0] = joints[:,:,0] * -1
# import pdb;pdb.set_trace()
return joints_pp
# def polygon_distance(self, angles):
# """
# Computes the distance of p_b[:,i] to polys[i]
# """
# # Slack variable due to numerical imprecision
# eps = self.eps_poly
# # Batch-dot prod
# dot = self.dot
# polys = self.angle_poly
# n_poly = self.n_poly
# n_vert = self.n_vert
# v1 = self.v1
# v2 = self.v2
# edges = self.edges
# zero = self.zero
# one = self.one
# l2 = self.l2
# # Check if the polygon contains the point
# # batch_size x n_poly x n_edges x 2
# # Distance of P[:,i] to all of poly[i] edges
# line = angles.view(-1,n_poly,1,2) - v1.view(1,n_poly,n_vert,2)
# # n_points x n_vertices x 1
# cross_prod = edges[:,:,:,0]*line[:,:,:,1] - edges[:,:,:,1]*line[:,:,:,0]
# # Reduce along the n_vertices dim
# contains = (cross_prod >= -eps)
# contains = contains.sum(dim=-1)
# contains = (contains==n_vert).float()
# t = torch.max(zero, torch.min(one, dot(edges,line) / l2)).unsqueeze(-1)
# proj = v1 + t * edges
# angles = angles.view(-1, n_poly,1,2)
# # Compute distance over all vertices
# d = torch.sum(
# torch.abs(torch.cos(angles) - torch.cos(proj)) +
# torch.abs(torch.sin(angles) - torch.sin(proj)),
# dim=-1)
# # Get the min
# D, _ = torch.min(d, dim=-1)
# # Assign 0 for points that are contained in the polygon
# d = (contains * 0 + (1-contains) * D) ** 2
# return d
def compute_rotation_matrix(self, rot_angles, bone_local):
''' rot_angles [BS, bone, 2 (flexion angle, abduction angle)]
'''
batch_size, bone, xy_size = rot_angles.shape
rot_angles_flat = rot_angles.reshape(batch_size * bone, 2)
bone_local_flat = bone_local.reshape(batch_size * bone, 3)
# mano canonical pose
canonical_rot_flat = self.canonical_rot_angles.repeat(batch_size, 1, 1).to(rot_angles_flat.device)
canonical_rot_flat = canonical_rot_flat.reshape(batch_size * bone, 2)
x = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
y = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
z = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
x[:, 0] = 1.
y[:, 1] = 1.
z[:, 2] = 1.
rotated_x = rotate(x, y, rot_angles_flat[:, 0].unsqueeze(1))
# print("rotated x", rotated_x, rotated_x.shape)
# print("bone local", bone_local_flat, bone_local_flat.shape)
# reverse transform starts here
# abduction
b_local_1 = rotate(bone_local_flat, rotated_x, -rot_angles_flat[:, 1].unsqueeze(1))
# flexion
b_local_2 = rotate(b_local_1, y, -rot_angles_flat[:, 0].unsqueeze(1))
# print("sanity check", (b_local_2 - z).abs().max())
# assert (b_local_2 - z).abs().max() < torch.tensor(1e-5)
abduction_angle = (-rot_angles_flat[:, 1] + canonical_rot_flat[:, 1]).unsqueeze(1)
# abduction_angle = (-rot_angles_flat[:, 1]).unsqueeze(1)
r_1 = rotation_matrix(abduction_angle, rotated_x)
flexion_angle = (-rot_angles_flat[:, 0] + canonical_rot_flat[:, 0]).unsqueeze(1)
# flexion_angle = (-rot_angles_flat[:, 0]).unsqueeze(1)
r_2 = rotation_matrix(flexion_angle, y)
# print("abduction angle", abduction_angle.shape)
# assert False
# print("r_1", r_1.shape)
# print("r_2", r_2.shape)
r = 0
r = torch.bmm(r_2, r_1)
r = r.reshape(batch_size, bone, 3, 3)
# mask root bones rotation
r[:, :5] = torch.eye(3, device=r.device)
# print("final r", r)
# x_angle = rot_angles[:, :, 0]
# y_angle = rot_angles[:, :, 1]
# print("rot_angles", rot_angles.shape)
# print("x_angle", x_angle.shape)
# print("y_angle", y_angle.shape)
# rot_x_mat = get_rot_mat_x(x_angle)
# rot_y_mat = get_rot_mat_y(y_angle)
# rot_x_y = torch.matmul(rot_y_mat, rot_x_mat)
return r # rot_x_y
def get_scale_mat_from_bone_lengths(self, bone_lengths):
scale_mat = torch.eye(3, device=bone_lengths.device).repeat(*bone_lengths.shape[:2], 1, 1)
# print("eye", scale_mat, scale_mat.shape)
scale_mat = bone_lengths.unsqueeze(-1) * scale_mat
# print("scale_mat", scale_mat, scale_mat.shape)
return scale_mat
def get_trans_mat_with_translation(self, trans_mat_without_scale_translation, local_coords_after_unpose, bones, bone_lengths):
# print("---- get trans mat with translation -----")
# print("trans_mat_without_scale_translation", trans_mat_without_scale_translation.shape)
# print("bones", bones.shape)
# print("bone_lengths", bone_lengths.shape)
translation = local_coords_after_unpose * bone_lengths
# translation = translation.unsqueeze(-1)
# print("translation", translation.shape)
# print(translation)
# add translation along kinematic chain
# no root
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
root_trans = translation[:, lev_1] * 0.
lev_1_trans = translation[:, lev_1]
lev_2_trans = translation[:, lev_2] + lev_1_trans
lev_3_trans = translation[:, lev_3] + lev_2_trans
lev_4_trans = translation[:, lev_4] + lev_3_trans
# print("lev_1_trans", lev_1_trans)
# print("lev_3_trans", lev_3_trans, lev_3_trans.shape)
# final_trans = torch.cat([lev_1_trans, lev_2_trans, lev_3_trans, lev_4_trans], dim=1)
final_trans = torch.cat([root_trans, lev_1_trans, lev_2_trans, lev_3_trans], dim=1)
# print("final trans", final_trans, final_trans.shape)
final_trans = final_trans.unsqueeze(-1)
trans_mat = torch.cat([trans_mat_without_scale_translation, final_trans], dim=3)
# print("trans_mat", trans_mat.shape)
last_row = torch.tensor([0., 0., 0., 1.], device=trans_mat.device).repeat(*trans_mat.shape[:2], 1 , 1)
trans_mat = torch.cat([trans_mat, last_row], dim=2)
# start_point = (bones[idx] * bone_lengths[idx])
# print("---- END get trans mat with translation -----")
return trans_mat
# def get_trans_mat_kinematic_chain(self, trans_mat_3_4):
# print("**** kinematic chain ****")
# trans_mat = trans_mat_3_4
# last_row = torch.tensor([0., 0., 0., 1.], device=trans_mat_3_4.device).repeat(*trans_mat_3_4.shape[:2], 1 , 1)
# trans_mat = torch.cat([trans_mat_3_4, last_row], dim=2)
# print("trans_mat", trans_mat.shape)
# # no root
# lev_1 = [0, 1, 2, 3, 4]
# lev_2 = [5, 6, 7, 8, 9]
# lev_3 = [10, 11, 12, 13, 14]
# lev_4 = [15, 16, 17, 18, 19]
# lev_1_mat = trans_mat[:, lev_1, : , :]
# lev_2_mat = torch.matmul(trans_mat[:, lev_2, : , :], lev_1_mat)
# lev_3_mat = torch.matmul(trans_mat[:, lev_3, : , :], lev_2_mat)
# lev_4_mat = torch.matmul(trans_mat[:, lev_4, : , :], lev_3_mat)
# print("lev_1_mat", lev_1_mat.shape)
# print("lev_4_mat", lev_4_mat.shape)
# final_mat = torch.cat([lev_1_mat, lev_2_mat, lev_3_mat, lev_4_mat], dim=1)
# # trans_mat = final_mat
# print("**** END kinematic chain ****")
# return trans_mat
def from_3x3_mat_to_4x4(self, mat_3x3):
last_col = torch.zeros(1, device=mat_3x3.device).repeat(*mat_3x3.shape[:2], 3, 1)
mat_3x4 = torch.cat([mat_3x3, last_col], dim=3)
# print("mat_3x3", mat_3x3.shape)
last_row = torch.tensor([0., 0., 0., 1.], device=mat_3x4.device).repeat(*mat_3x4.shape[:2], 1 , 1)
mat_4x4 = torch.cat([mat_3x4, last_row], dim=2)
return mat_4x4
def compute_adjusted_transpose(self, local_cs, rot_mat):
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
lev_1_cs = local_cs[:, lev_1]
lev_2_cs = local_cs[:, lev_2]
lev_2_rot = rot_mat[:, lev_2]
lev_3_cs = torch.matmul(lev_2_rot, local_cs[:, lev_3])
lev_3_rot = torch.matmul(rot_mat[:, lev_3], lev_2_rot)
lev_4_cs = torch.matmul(lev_3_rot, local_cs[:, lev_4])
# lev_3_rot = torch.matmul(rot_mat[:, lev_3], lev_2_rot)
adjust_cs = torch.cat([lev_1_cs, lev_2_cs, lev_3_cs, lev_4_cs], dim=1)
loacl_cs_transpose = torch.transpose(local_cs, -2, -1) + 0
loacl_cs_transpose[:, lev_3] = torch.matmul(loacl_cs_transpose[:, lev_3], lev_2_rot)
loacl_cs_transpose[:, lev_4] = torch.matmul(loacl_cs_transpose[:, lev_4], lev_3_rot)
transpose_cs = torch.transpose(adjust_cs, -2, -1)
# transpose_cs = torch.inverse(adjust_cs)
return loacl_cs_transpose # transpose_cs
def normalize_root_planes(self, bones, bone_lengths):
root = torch.zeros([3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', title='bones')
plot_local_coord(bones, bone_lengths, root, ax, show=False)
bones_ori = bones + 0
# import pdb; pdb.set_trace()
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d', title='bones')
root_plane_norm_mat = torch.eye(3, device=self.dev).repeat(bones.shape[0], 20, 1, 1)
# canonical_angle defines angles between root bone planes
# angle between (n0,n1), (n1,n2), (n2,n3)
# canonical_angle = np.array([0.8, 0.2, 0.2])
canonical_angle = self.root_plane_angles
# canonical_angle = 0
# Use the plane between middle(2) and ring(3) finger as reference plane (ref n2)
n2 = torch.cross(bones[:, 3], bones[:, 2])
# ring and middle (plane n1)
n1 = torch.cross(bones[:, 2], bones[:, 1])
n1_n2_angle = angle2(n1, n2)
# Rotate index finger root bone, apply the same transformation to thumb
index_trans = rotation_matrix(n1_n2_angle - canonical_angle[1], bones[:, 2])
root_plane_norm_mat[:, 1] = index_trans
root_plane_norm_mat[:, 0] = index_trans
# new_index = rotate(bones[:, 1], bones[:, 2], n1_n2_angle - canonical_angle)
# new_thumb = rotate(bones[:, 0], bones[:, 2], n1_n2_angle - canonical_angle)
bones = torch.matmul(root_plane_norm_mat, bones.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones, bone_lengths, root, ax)
# thumb and index (plane n0)
n1_after_adjust = torch.cross(bones[:, 2], bones[:, 1])
n0 = torch.cross(bones[:, 1], bones[:, 0])
n0_n1_angle = angle2(n0, n1_after_adjust)
thumb_trans = rotation_matrix(n0_n1_angle - canonical_angle[0], bones[:, 1])
root_plane_norm_mat[:, 0] = torch.matmul(thumb_trans, root_plane_norm_mat[:, 0])
bones[:, 0] = torch.matmul(thumb_trans, bones[:, 0].unsqueeze(-1)).squeeze(-1)
# new_thumb = rotate(bones[:, 0], bones[:, 1], n0_n1_angle - canonical_angle)
# bones[:, 0] = new_thumb
# plot_local_coord(bones, bone_lengths, root, ax)
# ring and pinky (plane n4)
n3 = torch.cross(bones[:, 4], bones[:, 3])
n2_n3_angle = angle2(n2, n3)
pinky_trans = rotation_matrix(canonical_angle[2] - n2_n3_angle, bones[:, 3])
root_plane_norm_mat[:, 4] = torch.matmul(pinky_trans, root_plane_norm_mat[:, 4])
bones[:, 4] = torch.matmul(pinky_trans, bones[:, 4].unsqueeze(-1)).squeeze(-1)
# new_pinky = rotate(bones[:, 4], bones[:, 3], canonical_angle - n2_n3_angle)
# bones[:, 4] = new_pinky
# plot_local_coord(bones, bone_lengths, root, ax, show=False)
# Propagate rotations along kinematic chains
for i in range(5):
for j in range(3):
root_plane_norm_mat[:, (j+1)*5 + i] = root_plane_norm_mat[:, i]
new_bones = torch.matmul(root_plane_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
plot_local_coord(new_bones, bone_lengths, root, ax)
return new_bones, root_plane_norm_mat
def normalize_root_bone_angles(self, bones, bone_lengths):
root = torch.zeros([3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', title='bones angle')
plot_local_coord(bones, bone_lengths, root, ax, show=False)
bones_ori = bones + 0
canonical_angle = 0.2 # 0.1
# angle between (t,i), (i,m), (m,r), (r,p)
# canonical_angle = np.array([0.4, 0.2, 0.2, 0.2])
canonical_angle = self.root_bone_angles
root_angle_norm_mat = torch.eye(3, device=self.dev).repeat(bones.shape[0], 20, 1, 1)
# canonical_angle defines angles between adjacent bones
# Use middle finger (f2) as reference
# Index finger (f1), apply the same transformation to thumb (f0)
n1 = cross(bones[:, 1], bones[:, 2], do_normalize=True)
f2_f1_angle = angle2(bones[:, 2], bones[:, 1])
index_trans = rotation_matrix(f2_f1_angle - canonical_angle[1], n1)
# new_index = rotate(bones[:, 1], n1, f2_f1_angle - canonical_angle)
root_angle_norm_mat[:, 1] = index_trans
root_angle_norm_mat[:, 0] = index_trans
bones[:, 1] = torch.matmul(index_trans, bones[:, 1].unsqueeze(-1)).squeeze(-1)
bones[:, 0] = torch.matmul(index_trans, bones[:, 0].unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones, bone_lengths, root, ax)
# Thumb (f0)
n0 = cross(bones[:, 0], bones[:, 1], do_normalize=True)
f1_f0_angle = angle2(bones[:, 1], bones[:, 0])
thumb_trans = rotation_matrix(f1_f0_angle - canonical_angle[0], n0)
root_angle_norm_mat[:, 0] = torch.matmul(thumb_trans, root_angle_norm_mat[:, 0])
bones[:, 0] = torch.matmul(thumb_trans, bones[:, 0].unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones, bone_lengths, root, ax)
# Ring finger (f3), apply the same transformation to pinky finger (f4)
n2 = cross(bones[:, 2], bones[:, 3], do_normalize=True)
f3_f2_angle = angle2(bones[:, 3], bones[:, 2])
ring_trans = rotation_matrix(canonical_angle[2] - f3_f2_angle, n2)
root_angle_norm_mat[:, 3] = ring_trans
root_angle_norm_mat[:, 4] = ring_trans
bones[:, 3] = torch.matmul(ring_trans, bones[:, 3].unsqueeze(-1)).squeeze(-1)
bones[:, 4] = torch.matmul(ring_trans, bones[:, 4].unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones, bone_lengths, root, ax)
# Pinky finger (f4)
n3 = cross(bones[:, 3], bones[:, 4], do_normalize=True)
f4_f3_angle = angle2(bones[:, 4], bones[:, 3])
pinky_trans = rotation_matrix(canonical_angle[3] - f4_f3_angle, n3)
root_angle_norm_mat[:, 4] = torch.matmul(pinky_trans, root_angle_norm_mat[:, 4])
bones[:, 4] = torch.matmul(pinky_trans, bones[:, 4].unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones, bone_lengths, root, ax)
# Propagate rotations along kinematic chains
for i in range(5):
for j in range(3):
root_angle_norm_mat[:, (j+1)*5 + i] = root_angle_norm_mat[:, i]
new_bones = torch.matmul(root_angle_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
plot_local_coord(bones, bone_lengths, root, ax)
return new_bones, root_angle_norm_mat
def forward(self, joints, kp3d_is_right):
assert joints.size(1) == 21, "Number of joints needs to be 21"
nrb_idx = self.nrb_idx
# Pre-process the joints
joints = self.preprocess_joints(joints, kp3d_is_right)
# Compute the bone vectors
bones, bone_lengths, kp_to_bone_mat = self.kp3D_to_bones(joints)
# Normalize the root bone planes
plane_normalized_bones, root_plane_norm_mat = self.normalize_root_planes(bones, bone_lengths)
# Normalize angles between root bones
angle_normalized_bones, root_angle_norm_mat = self.normalize_root_bone_angles(plane_normalized_bones, bone_lengths)
bones = angle_normalized_bones
# Combine plane normalization and angle normalization
root_bones_norm_mat = torch.matmul(root_angle_norm_mat, root_plane_norm_mat) # root_plane_norm_mat
# Compute the local coordinate systems for each bone
# This assume the root bones are fixed
local_cs = self.compute_local_coordinate_system(bones)
# Compute the local coordinates
local_coords = self.compute_local_coordinates(bones, local_cs)
# root = torch.zeros([1,21,3])
root = torch.zeros([3])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', title='local_coords')
# plot_local_coord_system(local_cs, bone_lengths, root, ax)
plot_local_coord(local_coords, bone_lengths, root, ax)
# Copmute the rotation around the y and rotated-x axis
rot_angles = self.compute_rot_angles(local_coords)
# print("rot angles", rot_angles)
# Compute rotation matrix
rot_mat = self.compute_rotation_matrix(rot_angles, local_coords)
# print("rot mat", rot_mat.shape)
# print("local_cs", local_cs.shape)
# loacl_cs_transpose = torch.transpose(local_cs, -2, -1)
loacl_cs_transpose = self.compute_adjusted_transpose(local_cs, rot_mat)
# print("loacl_cs_transpose", loacl_cs_transpose.shape)
# should_be_i = torch.matmul(loacl_cs_transpose, local_cs)
# print("sanity check", should_be_i)
# print("sanity check transpose", torch.bmm(loacl_cs_transpose, local_cs))
trans_mat_without_scale_translation = torch.matmul(loacl_cs_transpose, torch.matmul(rot_mat, local_cs))
# print("trans_mat_without_scale_translation", trans_mat_without_scale_translation)
####
# local_coords_no_back_proj = self.compute_local_coordinates(bones, torch.matmul(rot_mat, local_cs))
# print("--- local_coords_no_back_proj ---", local_coords_no_back_proj)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# Compute local coordinates of each bone after unposing to adjust keypoint translation
local_coords_after_unpose = self.compute_local_coordinates(bones, trans_mat_without_scale_translation)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# # # plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# plot_local_coord(local_coords_after_unpose, bone_lengths, root, ax) # , show=False)
#### normal transpose
# loacl_cs_normal_transpose = torch.transpose(local_cs, -2, -1)
# trans_mat_normal_transpose = torch.matmul(loacl_cs_normal_transpose, torch.matmul(rot_mat, local_cs))
# # print("loacl_cs_transpose", loacl_cs_transpose.shape)
# local_coords_normal_transpose = self.compute_local_coordinates(bones, trans_mat_normal_transpose)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# print("normal transpose")
# plot_local_coord(local_coords_normal_transpose, bone_lengths, root, ax)
# print("bone_lengths", bone_lengths, bone_lengths.shape)
# scale_mat = self.get_scale_mat_from_bone_lengths(bone_lengths)
# print("scale_mat", scale_mat, scale_mat.shape)
# trans_mat_without_translation = torch.matmul(scale_mat, trans_mat_without_scale_translation)
# This return 3 x 4 transformation matrix with translation
# trans_mat_with_translation = self.get_trans_mat_with_translation(
# trans_mat_without_scale_translation, local_coords_after_unpose, bones, bone_lengths)
# This return 4 x 4 transformation matrix
# trans_mat_kinematic_chain = self.get_trans_mat_kinematic_chain(trans_mat_with_translation)
trans_mat_without_scale_translation = self.from_3x3_mat_to_4x4(trans_mat_without_scale_translation)
# Convert bones back to keypoints
inv_scale_trans = self.compute_bone_to_kp_mat(bone_lengths, local_coords_after_unpose)
# Combine everything into one transformation matrix from posed keypoints to unposed keypoints
# import pdb; pdb.set_trace()
# root_bones_norm_mat
trans_mat = torch.matmul(self.from_3x3_mat_to_4x4(root_bones_norm_mat), kp_to_bone_mat)
trans_mat = torch.matmul(trans_mat_without_scale_translation, trans_mat)
# trans_mat = torch.matmul(trans_mat_without_scale_translation, kp_to_bone_mat)
trans_mat = torch.matmul(inv_scale_trans, trans_mat)
# Add root keypoint tranformation
# import pdb; pdb.set_trace()
root_trans = torch.eye(4, device=trans_mat.device).reshape(1, 1, 4, 4).repeat(trans_mat.shape[0], 1, 1, 1)
trans_mat = torch.cat([root_trans, trans_mat], dim=1)
bone_lengths = torch.cat([torch.ones([trans_mat.shape[0], 1, 1], device=trans_mat.device), bone_lengths], dim=1)
# Compute the angle loss
# def interval_loss(x, min_v, max_v):
# if min_v.dim() == 1:
# min_v = min_v.unsqueeze(0)
# max_v = max_v.unsqueeze(0)
# zero = self.zero
# return (torch.max(min_v - x, zero) + torch.max(x - max_v, zero))
# Discard the root bones
nrb_rot_angles = rot_angles[:, nrb_idx]
# Compute the polygon distance
# poly_d = self.polygon_distance(nrb_rot_angles)
# Compute the final loss
# per_batch_loss = poly_d.mean(1)
# angle_loss = per_batch_loss.mean()
# Storage for debug purposes
# self.per_batch_loss = per_batch_loss.detach()
self.bones = bones.detach()
self.local_cs = local_cs.detach()
self.local_coords = local_coords.detach()
self.nrb_rot_angles = nrb_rot_angles.detach()
# self.loss_per_sample = poly_d.detach()
return trans_mat , bone_lengths # rot_mat # rot_angles # angle_loss
if __name__ == '__main__':
import sys
sys.path.append('.')
import matplotlib.pyplot as plt
plt.ion()
from pose.utils.visualization_2 import plot_fingers
import yaml
from tqdm import tqdm
from prototypes.utils import get_data_reader
dev = torch.device('cpu')
# Load constraints
cfg_path = "hp_params/all_params.yaml"
hand_constraints = yaml.load(open(cfg_path).read())
# Hand parameters
for k,v in hand_constraints.items():
if isinstance(v, list):
hand_constraints[k] = torch.from_numpy(np.array(v)).float()
angle_poly = hand_constraints['convex_hull']
angle_loss = AngleLoss(angle_poly, dev=dev)
##### Consistency test. Make sure loss is 0 for all samples
# WARNING: This will fail because we approximate the angle polygon
# Get data reader
data_reader = get_data_reader(ds_name='stb', is_train=True)
# Make sure error is 0 for all samples of the training set
tol = 1e-8
for i in tqdm(range(len(data_reader))):
sample = data_reader[i]
kp3d = sample["joints3d"].view(-1,21,3)
is_right = sample["kp3d_is_right"].view(-1,1)
loss = angle_loss(kp3d, is_right)
import pdb;pdb.set_trace()
if loss > tol:
print("ERROR")
plot_fingers(kp3d[0])
import pdb;pdb.set_trace()
# Shifting shouldnt cause an issue neither
kp3d_center = kp3d - kp3d[:,0:1]
loss = angle_loss(kp3d_center, is_right)
if loss > tol:
print("ERROR")
plot_fingers(kp3d_center[0])
import pdb;pdb.set_trace()
# Scaling should be 0 error too
kp3d_scale = kp3d * 10
loss = angle_loss(kp3d_scale, is_right)
if loss > tol:
print("ERROR")
plot_fingers(kp3d_scale[0])
import pdb;pdb.set_trace()
##### SGD Test
# torch.manual_seed(4)
# x = torch.zeros(1,21,3)
# x[:,1:6] = torch.rand(1,5,3)
# is_right = torch.tensor(1.0).view(1,1)
# x.requires_grad_()
# print_freq = 100
# lr = 1e-1
# ax = None
# i = 0
# while True:
# # while i < 100:
# loss = root_bone_loss(x, is_right)
# loss.backward()
# if (i % print_freq) == 0:
# print("It: %d\tLoss: %.08f" % (i,loss.item()))
# to_plot = x[0].clone()
# ax = plot_fingers(to_plot, ax=ax, set_view=False)
# to_plot = to_plot.detach().numpy()
# ax.plot(to_plot[1:6,0], to_plot[1:6,1], to_plot[1:6,2], 'b')
# plt.show()
# plt.pause(0.001)
# if i == 0:
# # Pause for initial conditions
# input()
# with torch.no_grad():
# x = x - lr * x.grad
# x.requires_grad_()
# i += 1
| 54,446 | 40.753834 | 130 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/converter.py | # ------------------------------------------------------------------------------
# Copyright (c) 2019 Adrian Spurr
# Licensed under the GPL License.
# Written by Adrian Spurr
# ------------------------------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1, pad34_to_44
eps = torch.tensor(1e-6) # epsilon
def batch_dot_product(batch_1, batch_2, keepdim=False):
""" Performs the batch-wise dot product
"""
# n_elem = batch_1.size(1)
# batch_size = batch_1.size(0)
# # Idx of the diagonal
# diag_idx = torch.tensor([[i,i] for i in range(n_elem)]).long()
# # Perform for each element of batch a matmul
# batch_prod = torch.matmul(batch_1, batch_2.transpose(2,1))
# # Extract the diagonal
# batch_dot_prod = batch_prod[:, diag_idx[:,0], diag_idx[:,1]]
# if keepdim:
# batch_dot_prod = batch_dot_prod.reshape(batch_size, -1, 1)
batch_dot_prod = (batch_1 * batch_2).sum(-1, keepdim=keepdim)
return batch_dot_prod
def rotate_axis_angle(v, k, theta):
# Rotate v around k by theta using rodrigues rotation formula
v_rot = v * torch.cos(theta) + \
torch.cross(k,v.float())*torch.sin(theta) + \
k*batch_dot_product(k,v, True)*(1-torch.cos(theta))
return v_rot
def clip_values(x, min_v, max_v):
clipped = torch.min(torch.max(x, min_v), max_v)
return clipped
def pyt2np(x):
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
return x
def normalize(bv, eps=1e-8): # epsilon
"""
Normalizes the last dimension of bv such that it has unit length in
euclidean sense
"""
eps_mat = torch.tensor(eps, device=bv.device)
norm = torch.max(torch.norm(bv, dim=-1, keepdim=True), eps_mat)
bv_n = bv / norm
return bv_n
def angle2(v1, v2):
"""
Numerically stable way of calculating angles.
See: https://scicomp.stackexchange.com/questions/27689/numerically-stable-way-of-computing-angles-between-vectors
"""
eps = 1e-10 # epsilon
eps_mat = torch.tensor([eps], device=v1.device)
n_v1 = v1 / torch.max(torch.norm(v1, dim=-1, keepdim=True), eps_mat)
n_v2 = v2 / torch.max(torch.norm(v2, dim=-1, keepdim=True), eps_mat)
a = 2 * torch.atan2(
torch.norm(n_v1 - n_v2, dim=-1), torch.norm(n_v1 + n_v2, dim=-1)
)
return a
def signed_angle(v1, v2, ref):
"""
Calculate signed angles of v1 with respect to v2
The sign is positive if v1 x v2 points to the same direction as ref
"""
def dot(x, y):
return (x * y).sum(-1)
angles = angle2(v1, v2)
cross_v1_v2 = cross(v1, v2)
# Compute sign
cond = (dot(ref, cross_v1_v2) < 0).float()
angles = cond * (-angles) + (1 - cond) * angles
# import pdb; pdb.set_trace()
return angles
def get_alignment_mat(v1, v2):
"""
Returns the rotation matrix R, such that R*v1 points in the same direction as v2
"""
axis = cross(v1, v2, do_normalize=True)
ang = angle2(v1, v2)
R = rotation_matrix(ang, axis)
return R
def transform_to_canonical(kp3d, is_right, skeleton='bmc'):
"""Undo global translation and rotation
"""
normalization_mat = compute_canonical_transform(kp3d.double(), is_right.double(), skeleton=skeleton)
kp3d = xyz_to_xyz1(kp3d)
# import pdb
# pdb.set_trace()
kp3d_canonical = torch.matmul(normalization_mat.unsqueeze(1), kp3d.unsqueeze(-1))
kp3d_canonical = kp3d_canonical.squeeze(-1)
# Pad T from 3x4 mat to 4x4 mat
normalization_mat = pad34_to_44(normalization_mat)
return kp3d_canonical, normalization_mat
def compute_canonical_transform(kp3d, is_right, skeleton='bmc'):
"""
Returns a transformation matrix T which when applied to kp3d performs the following
operations:
1) Center at the root (kp3d[:,0])
2) Rotate such that the middle root bone points towards the y-axis
3) Rotates around the x-axis such that the YZ-projection of the normal of the plane
spanned by middle and index root bone points towards the z-axis
"""
assert len(kp3d.shape) == 3, "kp3d need to be BS x 21 x 3"
assert is_right.shape[0] == kp3d.shape[0]
is_right = is_right.type(torch.bool)
dev = kp3d.device
bs = kp3d.shape[0]
kp3d = kp3d.clone().detach()
# Flip so that we compute the correct transformations below
kp3d[~is_right, :, 1] *= -1
# Align root
tx = kp3d[:, 0, 0]
ty = kp3d[:, 0, 1]
tz = kp3d[:, 0, 2]
# Translation
T_t = torch.zeros((bs, 3, 4), device=dev)
T_t[:, 0, 3] = -tx
T_t[:, 1, 3] = -ty
T_t[:, 2, 3] = -tz
T_t[:, 0, 0] = 1
T_t[:, 1, 1] = 1
T_t[:, 2, 2] = 1
# Align middle root bone with -y-axis
# x_axis = torch.tensor([[1.0, 0.0, 0.0]], device=dev).expand(bs, 3) # FIXME
y_axis = torch.tensor([[0.0, -1.0, 0.0]], device=dev).expand(bs, 3)
v_mrb = normalize(kp3d[:, 3] - kp3d[:, 0])
R_1 = get_alignment_mat(v_mrb, y_axis)
# Align x-y plane along plane spanned by index and middle root bone of the hand
# after R_1 has been applied to it
v_irb = normalize(kp3d[:, 2] - kp3d[:, 0])
normal = cross(v_mrb, v_irb).view(-1, 1, 3)
normal_rot = torch.matmul(normal, R_1.transpose(1, 2)).view(-1, 3)
z_axis = torch.tensor([[0.0, 0.0, 1.0]], device=dev).expand(bs, 3)
R_2 = get_alignment_mat(normal_rot, z_axis)
# Include the flipping into the transformation
T_t[~is_right, 1, 1] = -1
# Compute the canonical transform
T = torch.bmm(R_2.double(), torch.bmm(R_1.double(), T_t.double()))
return T
def set_equal_xyz_scale(ax, X, Y, Z):
max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
return ax
def set_axes_equal(ax: plt.Axes):
"""Set 3D plot axes to equal scale.
Make axes of 3D plot have equal scale so that spheres appear as
spheres and cubes as cubes. Required since `ax.axis('equal')`
and `ax.set_aspect('equal')` don't work on 3D.
"""
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
_set_axes_radius(ax, origin, radius)
def _set_axes_radius(ax, origin, radius):
x, y, z = origin
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius, z + radius])
def plot_local_coord_system(local_cs, bones, bone_lengths, root, ax):
local_cs = pyt2np(local_cs.squeeze())
bones = pyt2np(bones.squeeze())
bone_lengths = pyt2np(bone_lengths.squeeze())
root = pyt2np(root.squeeze())
col = ['r','g','b']
n_fingers = 5
n_b_per_f = 4
print("local_cs", local_cs)
for k in range(n_fingers):
start_point = root.copy()
for i in range(n_b_per_f):
idx = i * n_fingers + k
for j in range(3):
ax.quiver(start_point[0], start_point[1], start_point[2],
local_cs[idx, j, 0], local_cs[idx, j, 1], local_cs[idx, j, 2],
color=col[j], length=10.1)
old_start_point = start_point.copy()
start_point += (bones[idx] * bone_lengths[idx])
ax.plot([old_start_point[0], start_point[0]],
[old_start_point[1], start_point[1]],
[old_start_point[2], start_point[2]],
color="black")
set_axes_equal(ax)
plt.show()
def plot_local_coord(local_coords, bone_lengths, root, ax, show=True):
local_coords = pyt2np(local_coords.squeeze())
# bones = pyt2np(bones.squeeze())
bone_lengths = pyt2np(bone_lengths.squeeze())
root = pyt2np(root.squeeze())
# root = root[0]
print("root", root.shape)
col = ['r', 'g', 'b']
n_fingers = 5
n_b_per_f = 4
# print("local_cs", local_coords)
# import pdb; pdb.set_trace()
for k in range(n_fingers):
start_point = root.copy()
for i in range(n_b_per_f):
idx = i * n_fingers + k
# for j in range(3):
# print("local_coords[idx]", local_coords[idx])
local_bone = (local_coords[idx] * bone_lengths[idx])
target_point = start_point + local_bone
# print("start_point", start_point, "to", local_bone)
cc = 'r' if show else 'b'
ax.plot([start_point[0], target_point[0]],
[start_point[1], target_point[1]],
[start_point[2], target_point[2]],
color=cc)
ax.scatter([target_point[0]], [target_point[1]], [target_point[2]], s=10.0)
# start_point += (bones[idx] * bone_lengths[idx])
start_point += (local_bone)
set_axes_equal(ax)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
plt.show()
def rotation_matrix(angles, axis):
"""
Converts Rodrigues rotation formula into a rotation matrix
"""
eps = torch.tensor(1e-6) # epsilon
# print("norm", torch.abs(torch.sum(axis ** 2, dim=-1) - 1))
try:
assert torch.any(
torch.abs(torch.sum(axis ** 2, dim=-1) - 1) < eps
), "axis must have unit norm"
except:
print("Warning: axis does not have unit norm")
# import pdb
# pdb.set_trace()
dev = angles.device
batch_size = angles.shape[0]
sina = torch.sin(angles).view(batch_size, 1, 1)
cosa_1_minus = (1 - torch.cos(angles)).view(batch_size, 1, 1)
a_batch = axis.view(batch_size, 3)
o = torch.zeros((batch_size, 1), device=dev)
a0 = a_batch[:, 0:1]
a1 = a_batch[:, 1:2]
a2 = a_batch[:, 2:3]
cprod = torch.cat((o, -a2, a1, a2, o, -a0, -a1, a0, o), 1).view(batch_size, 3, 3)
I = torch.eye(3, device=dev).view(1, 3, 3)
R1 = cprod * sina
R2 = cprod.bmm(cprod) * cosa_1_minus
R = I + R1 + R2
return R
def cross(bv_1, bv_2, do_normalize=False):
"""
Computes the cross product of the last dimension between bv_1 and bv_2.
If normalize is True, it normalizes the vector to unit length.
"""
cross_prod = torch.cross(bv_1.double(), bv_2.double(), dim=-1)
if do_normalize:
cross_prod = normalize(cross_prod)
return cross_prod
def rotate(v, ax, rad):
"""
Uses Rodrigues rotation formula
Rotates the vectors in v around the axis in ax by rad radians. These
operations are applied on the last dim of the arguments. The parameter rad
is given in radian
"""
# print("v", v, v.shape)
# print("ax", ax, ax.shape)
# print("rad", rad)
sin = torch.sin
cos = torch.cos
v_rot = (
v * cos(rad) + cross(ax, v) * sin(rad) + ax * batch_dot_product(ax, v, True) * (1 - cos(rad))
)
return v_rot
class PoseConverter(nn.Module):
def __init__(self, store=False, dev='cpu', straight_hand=True, canonical_pose=None):
super().__init__()
# assert angle_poly.shape[0] == 15
if not dev:
dev = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
self.store = store
self.dev = dev
# self.angle_poly = angle_poly.to(dev)
self.idx_1 = torch.arange(1,21).to(dev).long()
self.idx_2 = torch.zeros(20).to(dev).long()
self.idx_2[:5] = 0
self.idx_2[5:] = torch.arange(1,16)
# For preprocess joints
self.shift_factor = 0 # 0.5
# For poly distance
# n_poly = angle_poly.shape[0]
# n_vert = angle_poly.shape[1]
# idx_1 = torch.arange(n_vert)
# idx_2 = idx_1.clone()
# idx_2[:-1] = idx_1[1:]
# idx_2[-1] = idx_1[0]
# # n_poly x n_edges x 2
# v1 = angle_poly[:,idx_1].to(dev)
# v2 = angle_poly[:,idx_2].to(dev)
# # n_poly x n_edges x 2
# edges = (v2 - v1).view(1, n_poly, n_vert, 2)
# Store
# self.v1 = v1
# self.v2 = v2
# self.n_poly = n_poly
# self.n_vert = n_vert
# self.edges = edges
self.dot = lambda x,y: (x*y).sum(-1)
# self.l2 = self.dot(edges,edges)
self.zero = torch.zeros((1), device=dev)
self.one = torch.ones((1), device=dev)
# For angle computation
self.rb_idx = torch.arange(5, device=dev).long()
self.nrb_idx_list = []
for i in range(2, 4):
self.nrb_idx_list += [torch.arange(i*5, (i+1)*5, device=dev).long()]
self.nrb_idx = torch.arange(5,20, device=dev).long()
self.one = torch.ones((1), device=dev)
self.zero = torch.zeros((1), device=dev)
self.eps_mat = torch.tensor(1e-9, device=dev) # epsilon
self.eps = eps
self.eps_poly = 1e-2
self.y_axis = torch.tensor([[[0,1.,0]]], device=dev)
self.x_axis = torch.tensor([[[1.,0,0]]], device=dev)
self.z_axis = torch.tensor([[[0],[0],[1.]]], device=dev)
self.xz_mat = torch.tensor([[[[1.,0,0],[0,0,0],[0,0,1]]]], device=dev)
self.yz_mat = torch.tensor([[[[0,0,0],[0,1.,0],[0,0,1]]]], device=dev)
self.flipLR = torch.tensor([[[-1.,1.,1.]]], device=dev)
self.bones = None
self.bone_lengths = None
self.local_cs = None
self.local_coords = None
self.rot_angles = None
if canonical_pose is not None:
self.initialize_canonical_pose(canonical_pose)
else:
# initialize canonical pose with some value
self.root_plane_angles = np.array([0.8, 0.2, 0.2]) # np.array([0.0, 0.0, 0.0])
self.root_bone_angles = np.array([0.4, 0.2, 0.2, 0.2])
if straight_hand:
# For NASA - no additional rotation
self.canonical_rot_angles = torch.tensor([[[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]]]
)
else:
# # For MANO
self.canonical_rot_angles = torch.tensor([[[-6.8360e-01, 2.8175e-01],
[-1.3016e+00, -1.4236e-03],
[-1.5708e+00, -1.4236e-03],
[-1.8425e+00, 7.4600e-02],
[-2.0746e+00, 1.8700e-01],
[-4.2529e-01, 1.4156e-01],
[-1.5473e-01, 1.8678e-01],
[-7.1449e-02, 1.4358e-01],
[-8.7801e-02, -1.5822e-01],
[-1.4013e-01, 3.4336e-02],
[ 3.3824e-01, 1.6999e-01],
[ 1.8830e-01, 4.8844e-02],
[ 1.1238e-01, -8.7551e-03],
[ 1.1125e-01, 1.6023e-01],
[ 5.3791e-02, -4.2887e-02],
[-1.0314e-01, -1.2587e-02],
[-9.8003e-02, -1.7479e-01],
[-6.6223e-02, 2.9800e-02],
[-2.4101e-01, -5.5725e-02],
[-1.3926e-01, -8.2840e-02]]]
)
def initialize_canonical_pose(self, canonical_joints):
# canonical_joints must be of size [1, 21]
# Pre-process the joints
joints = self.preprocess_joints(canonical_joints, torch.ones(canonical_joints.shape[0], device=canonical_joints.device))
# Compute the bone vectors
bones, bone_lengths, kp_to_bone_mat = self.kp3D_to_bones(joints)
self.root_plane_angles = self._compute_root_plane_angle(bones) # np.array([0.8, 0.2, 0.2])
self.root_bone_angles = self._compute_root_bone_angle(bones) # np.array([0.4, 0.2, 0.2, 0.2])
# Compute the local coordinate systems for each bone
# This assume the root bones are fixed
local_cs = self.compute_local_coordinate_system(bones)
# Compute the local coordinates
local_coords = self.compute_local_coordinates(bones, local_cs)
# Copmute the rotation around the y and rotated-x axis
self.canonical_rot_angles = self.compute_rot_angles(local_coords)
# check dimentions
import pdb; pdb.set_trace()
pass
def _compute_root_plane_angle(self, bones):
root_plane_angle = np.zeros(3)
# canonical_angle defines angles between root bone planes
# angle between (n0,n1), (n1,n2), (n2,n3)
# middle and ring (plane n2)
n2 = torch.cross(bones[:, 3], bones[:, 2])
# index and middle (plane n1)
n1 = torch.cross(bones[:, 2], bones[:, 1])
root_plane_angle[1] = angle2(n1, n2).squeeze(0)
# thumb and index (plane n0)
n0 = torch.cross(bones[:, 1], bones[:, 0])
root_plane_angle[0] = angle2(n0, n1).squeeze(0)
# ring and pinky (plane n4)
n3 = torch.cross(bones[:, 4], bones[:, 3])
root_plane_angle[2] = angle2(n2, n3).squeeze(0)
return root_plane_angle
def _compute_root_bone_angle(self, bones):
root_bone_angles = np.zeros(4)
root_bone_angles[0] = angle2(bones[:, 1], bones[:, 0]).squeeze(0)
root_bone_angles[1] = angle2(bones[:, 2], bones[:, 1]).squeeze(0)
root_bone_angles[2] = angle2(bones[:, 3], bones[:, 2]).squeeze(0)
root_bone_angles[3] = angle2(bones[:, 4], bones[:, 3]).squeeze(0)
return root_bone_angles
def kp3D_to_bones(self, kp_3D):
"""
Converts from joints to bones
"""
dev = self.dev
eps_mat = self.eps_mat
batch_size = kp_3D.shape[0]
bones = kp_3D[:, self.idx_1] - kp_3D[:, self.idx_2] # .detach()
bone_lengths = torch.max(torch.norm(bones, dim=2, keepdim=True), eps_mat)
bones = bones / bone_lengths
translate = torch.eye(4, device=dev).repeat(batch_size, 20, 1, 1)
# print("translate", translate.shape)
# print("kp 3D", kp_3D.shape)
translate[:, :20, :3, 3] = -1. * kp_3D[:, self.idx_2]
# print(translate.detach())
scale = torch.eye(4, device=dev).repeat(batch_size, 20, 1, 1)
# print("bone_lengths", bone_lengths.shape)
scale = scale * 1. / bone_lengths.unsqueeze(-1)
scale[:, :, 3, 3] = 1.0
# print("scale", scale)
kp_to_bone_mat = torch.matmul(scale.double(), translate.double())
# print(kp_to_bone_mat)
# import pdb;pdb.set_trace()
# assert False
return bones, bone_lengths, kp_to_bone_mat
def compute_bone_to_kp_mat(self, bone_lengths, local_coords_canonical):
bone_to_kp_mat = torch.eye(4, device=bone_lengths.device).repeat(*bone_lengths.shape[:2], 1, 1)
# scale
bone_to_kp_mat = bone_to_kp_mat * bone_lengths.unsqueeze(-1)
bone_to_kp_mat[:, :, 3, 3] = 1.0
# print("bone_to_kp_mat", bone_to_kp_mat, bone_to_kp_mat.shape)
# assert False
# add translation along kinematic chain
# no root
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
bones_scaled = local_coords_canonical * bone_lengths
# print("bone length shape", bone_lengths.shape)
lev_1_trans = torch.zeros([bone_lengths.shape[0], 5, 3], device=bone_lengths.device)
lev_2_trans = bones_scaled[:, lev_1]
lev_3_trans = bones_scaled[:, lev_2] + lev_2_trans
lev_4_trans = bones_scaled[:, lev_3] + lev_3_trans
translation = torch.cat([lev_1_trans, lev_2_trans, lev_3_trans, lev_4_trans], dim=1)
# print("translation", translation.shape)
bone_to_kp_mat[:, :, :3, 3] = translation # [:, :, :]
# print(bone_to_kp_mat)
# assert False
return bone_to_kp_mat
def compute_local_coordinate_system(self, bones):
dev = self.dev
dot = batch_dot_product
n_fingers = 5
batch_size = bones.size(0)
n_bones = bones.size(1)
rb_idx = self.rb_idx
# rb_idx_bin = self.rb_idx_bin
nrb_idx_list = self.nrb_idx_list
one = self.one
eps = self.eps
eps_mat = self.eps_mat
xz_mat = self.xz_mat
z_axis = self.z_axis
bs = bones.size(0)
y_axis = self.y_axis.repeat(bs, 5, 1)
x_axis = self.x_axis.repeat(bs, 5, 1)
# Get the root bones
root_bones = bones[:, rb_idx]
# Compute the plane normals for each neighbouring root bone pair
# Compute the plane normals directly
plane_normals = torch.cross(root_bones[:,:-1], root_bones[:,1:], dim=2)
# Compute the plane normals flipped (sometimes gives better grad)
# WARNING: Uncomment flipping below
# plane_normals = torch.cross(root_bones[:,1:], root_bones[:,:-1], dim=2)
# Normalize them
plane_norms = torch.norm(plane_normals, dim=2, keepdim=True)
plane_norms = torch.max(plane_norms, eps_mat)
plane_normals = plane_normals / plane_norms
# Define the normals of the planes on which the fingers reside (model assump.)
finger_plane_norms = torch.zeros((batch_size, n_fingers, 3), device=dev)
finger_plane_norms[:,0] = plane_normals[:,0]
finger_plane_norms[:,1] = plane_normals[:,1]
finger_plane_norms[:,2] = (plane_normals[:,1] + plane_normals[:,2]) / 2
finger_plane_norms[:,3] = (plane_normals[:,2] + plane_normals[:,3]) / 2
finger_plane_norms[:,4] = plane_normals[:,3]
# Flip the normals s.t they look towards the palm of the hands
# finger_plane_norms = -finger_plane_norms
# Root bones are in the global coordinate system
coord_systems = torch.zeros((batch_size, n_bones, 3, 3), device=dev)
# Root bone coordinate systems
coord_systems[:, rb_idx] = torch.eye(3, device=dev)
# Root child bone coordinate systems
z = bones[:, rb_idx]
y = torch.cross(bones[:,rb_idx].double(), finger_plane_norms.double())
x = torch.cross(y,z)
# Normalize to unit length
x_norm = torch.max(torch.norm(x, dim=2, keepdim=True), eps_mat)
x = x / x_norm
y_norm = torch.max(torch.norm(y, dim=2, keepdim=True), eps_mat)
y = y / y_norm
# Parent bone is already normalized
# z = z / torch.norm(z, dim=2, keepdim=True)
# Assign them to the coordinate system
coord_systems[:, rb_idx + 5, 0] = x.float()
coord_systems[:, rb_idx + 5, 1] = y.float()
coord_systems[:, rb_idx + 5, 2] = z.float()
# Construct the remaining bone coordinate systems iteratively
# TODO This can be potentially sped up by rotating the root child bone
# instead of the parent bone
for i in range(2, 4):
idx = nrb_idx_list[i - 2]
bone_vec_grandparent = bones[:, idx - 2 * 5]
bone_vec_parent = bones[:, idx - 1 * 5]
# bone_vec_child = bones[:,idx]
p_coord = coord_systems[:, idx - 1 * 5]
###### IF BONES ARE STRAIGHT LINE
# Transform into local coordinates
lbv_1 = torch.matmul(p_coord.float(), bone_vec_grandparent.unsqueeze(-1).float())
lbv_2 = torch.matmul(p_coord.float(), bone_vec_parent.unsqueeze(-1).float())
###### Angle_xz
# Project onto local xz plane
lbv_2_xz = torch.matmul(xz_mat, lbv_2).squeeze(-1)
lbv_2 = lbv_2.squeeze(-1)
# Compute the dot product
dot_prod_xz = torch.matmul(lbv_2_xz, z_axis).squeeze(-1)
# If dot product is close to zero, set it to zero
cond_0 = (torch.abs(dot_prod_xz) < 1e-6).float()
dot_prod_xz = cond_0 * 0 + (1 - cond_0) * dot_prod_xz
# Compute the norm and make sure its non-zero
norm_xz = torch.max(torch.norm(lbv_2_xz, dim=-1), eps_mat)
# Normalize the dot product
dot_prod_xz = dot_prod_xz / norm_xz
# Clip such that we do not get NaNs during GD
dot_prod_xz = clip_values(dot_prod_xz, -one+eps, one-eps)
# Compute the angle from the z-axis
angle_xz = torch.acos(dot_prod_xz)
# If lbv2_xz is on the -x side, we interpret it as -angle
cond_1 = ((lbv_2_xz[:,:,0] + 1e-6) < 0).float()
angle_xz = cond_1 * (-angle_xz) + (1-cond_1) * angle_xz
###### Angle_yz
# Compute the normalized dot product
dot_prod_yz = batch_dot_product(lbv_2_xz, lbv_2).squeeze(-1)
dot_prod_yz = dot_prod_yz / norm_xz
dot_prod_yz = clip_values(dot_prod_yz, -one+eps, one-eps)
# Compute the angle from the projected bone
angle_yz = torch.acos(dot_prod_yz)
# If bone is on -y side, we interpret it as -angle
cond_2 = ((lbv_2[:,:,1] + 1e-6) < 0).float()
angle_yz = cond_2 * (-angle_yz) + (1-cond_2) * angle_yz
###### Compute the local coordinate system
angle_xz = angle_xz.unsqueeze(-1)
angle_yz = angle_yz.unsqueeze(-1)
# Transform rotation axis to global
rot_axis_xz = torch.matmul(p_coord.transpose(2,3),
y_axis.unsqueeze(-1))
rot_axis_y = rotate_axis_angle(x_axis, y_axis, angle_xz)
rot_axis_y = torch.matmul(p_coord.transpose(2,3),
rot_axis_y.unsqueeze(-1))
rot_axis_y = rot_axis_y.squeeze(-1)
rot_axis_xz = rot_axis_xz.squeeze(-1)
cond = (torch.abs(angle_xz) < eps).float()
x = cond*x + (1-cond)*rotate_axis_angle(x, rot_axis_xz, angle_xz)
y = cond*y + (1-cond)*rotate_axis_angle(y, rot_axis_xz, angle_xz)
z = cond*z + (1-cond)*rotate_axis_angle(z, rot_axis_xz, angle_xz)
# Rotate around rotated x/-x
cond = (torch.abs(angle_yz) < eps).float()
x = cond*x + (1-cond)*rotate_axis_angle(x, rot_axis_y, -angle_yz)
y = cond*y + (1-cond)*rotate_axis_angle(y, rot_axis_y, -angle_yz)
z = cond*z + (1-cond)*rotate_axis_angle(z, rot_axis_y, -angle_yz)
coord_systems[:, idx, 0] = x.float()
coord_systems[:, idx, 1] = y.float()
coord_systems[:, idx, 2] = z.float()
return coord_systems.detach()
def compute_local_coordinates(self, bones, coord_systems):
local_coords = torch.matmul(coord_systems, bones.unsqueeze(-1))
return local_coords.squeeze(-1)
def compute_rot_angles(self, local_coords):
n_bones = local_coords.size(1)
z_axis = self.z_axis
xz_mat = self.xz_mat
yz_mat = self.yz_mat
one = self.one
eps = self.eps
eps_mat = self.eps_mat
# Compute the flexion angle
# Project bone onto the xz-plane
proj_xz = torch.matmul(xz_mat, local_coords.unsqueeze(-1)).squeeze(-1)
norm_xz = torch.max(torch.norm(proj_xz, dim=-1), eps_mat)
dot_prod_xz = torch.matmul(proj_xz, z_axis).squeeze(-1)
cond_0 = (torch.abs(dot_prod_xz) < 1e-6).float()
dot_prod_xz = cond_0 * 0 + (1-cond_0) * dot_prod_xz
dot_prod_xz = dot_prod_xz / norm_xz
dot_prod_xz = clip_values(dot_prod_xz, -one+eps, one-eps)
# Compute the angle from the z-axis
angle_xz = torch.acos(dot_prod_xz)
# If proj_xz is on the -x side, we interpret it as -angle
cond_1 = ((proj_xz[:,:,0] + 1e-6) < 0).float()
angle_xz = cond_1 * (-angle_xz) + (1-cond_1) * angle_xz
# Compute the abduction angle
dot_prod_yz = batch_dot_product(proj_xz, local_coords).squeeze(-1)
dot_prod_yz = dot_prod_yz / norm_xz
dot_prod_yz = clip_values(dot_prod_yz, -one+eps, one-eps)
# Compute the angle from the projected bone
angle_yz = torch.acos(dot_prod_yz)
# If bone is on y side, we interpret it as -angle
cond_2 = ((local_coords[:,:,1] + 1e-6) > 0).float()
angle_yz = cond_2 * (-angle_yz) + (1-cond_2) * angle_yz
# Concatenate both matrices
rot_angles = torch.cat((angle_xz.unsqueeze(-1), angle_yz.unsqueeze(-1)),
dim=-1)
return rot_angles
def preprocess_joints(self, joints, is_right):
"""
This function does the following:
- Move palm-centered root to wrist-centered root
- Root-center (for easier flipping)
- Flip left hands to right
"""
# Had to formulate it this way such that backprop works
joints_pp = 0 + joints
# Vector from palm to wrist (simplified expression on paper)
vec = joints[:,0] - joints[:,3]
vec = vec / torch.norm(vec, dim=1, keepdim=True)
# This is BUG !!!!
# Shift palm in direction wrist with factor shift_factor
joints_pp[:,0] = joints[:,0] + self.shift_factor * vec
# joints_pp[:,0] = 2*joints[:,0] - joints[:,3]
# joints_pp = joints_pp - joints_pp[:,0]
# if not kp3d_is_right:
# joints_pp = joints_pp * torch.tensor([[-1.,1.,1.]]).view(-1,1,3)
# Flip left handed joints
is_right = is_right.view(-1,1,1)
joints_pp = joints_pp * is_right + (1-is_right) * joints_pp * self.flipLR
# DEBUG
# joints = joints.clone()
# palm = joints[:,0]
# middle_mcp = joints[:,3]
# wrist = 2*palm - middle_mcp
# joints[:,0] = wrist
# # Root-center (for easier flipping)
# joints = joints - joints[:,0]
# # Flip if left hand
# if not kp3d_is_right:
# joints[:,:,0] = joints[:,:,0] * -1
# import pdb;pdb.set_trace()
return joints_pp
# def polygon_distance(self, angles):
# """
# Computes the distance of p_b[:,i] to polys[i]
# """
# # Slack variable due to numerical imprecision
# eps = self.eps_poly
# # Batch-dot prod
# dot = self.dot
# polys = self.angle_poly
# n_poly = self.n_poly
# n_vert = self.n_vert
# v1 = self.v1
# v2 = self.v2
# edges = self.edges
# zero = self.zero
# one = self.one
# l2 = self.l2
# # Check if the polygon contains the point
# # batch_size x n_poly x n_edges x 2
# # Distance of P[:,i] to all of poly[i] edges
# line = angles.view(-1,n_poly,1,2) - v1.view(1,n_poly,n_vert,2)
# # n_points x n_vertices x 1
# cross_prod = edges[:,:,:,0]*line[:,:,:,1] - edges[:,:,:,1]*line[:,:,:,0]
# # Reduce along the n_vertices dim
# contains = (cross_prod >= -eps)
# contains = contains.sum(dim=-1)
# contains = (contains==n_vert).float()
# t = torch.max(zero, torch.min(one, dot(edges,line) / l2)).unsqueeze(-1)
# proj = v1 + t * edges
# angles = angles.view(-1, n_poly,1,2)
# # Compute distance over all vertices
# d = torch.sum(
# torch.abs(torch.cos(angles) - torch.cos(proj)) +
# torch.abs(torch.sin(angles) - torch.sin(proj)),
# dim=-1)
# # Get the min
# D, _ = torch.min(d, dim=-1)
# # Assign 0 for points that are contained in the polygon
# d = (contains * 0 + (1-contains) * D) ** 2
# return d
def compute_rotation_matrix(self, rot_angles, bone_local):
''' rot_angles [BS, bone, 2 (flexion angle, abduction angle)]
'''
batch_size, bone, xy_size = rot_angles.shape
rot_angles_flat = rot_angles.reshape(batch_size * bone, 2)
bone_local_flat = bone_local.reshape(batch_size * bone, 3)
# mano canonical pose
canonical_rot_flat = self.canonical_rot_angles.repeat(batch_size, 1, 1).to(rot_angles_flat.device)
canonical_rot_flat = canonical_rot_flat.reshape(batch_size * bone, 2)
x = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
y = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
z = torch.zeros([batch_size * bone, 3], device=rot_angles_flat.device)
x[:, 0] = 1.
y[:, 1] = 1.
z[:, 2] = 1.
rotated_x = rotate(x, y, rot_angles_flat[:, 0].unsqueeze(1))
# print("rotated x", rotated_x, rotated_x.shape)
# print("bone local", bone_local_flat, bone_local_flat.shape)
# reverse transform starts here
# abduction
b_local_1 = rotate(bone_local_flat, rotated_x, -rot_angles_flat[:, 1].unsqueeze(1))
# flexion
b_local_2 = rotate(b_local_1, y, -rot_angles_flat[:, 0].unsqueeze(1))
# print("sanity check", (b_local_2 - z).abs().max())
# assert (b_local_2 - z).abs().max() < torch.tensor(1e-5)
abduction_angle = (-rot_angles_flat[:, 1] + canonical_rot_flat[:, 1]).unsqueeze(1)
# abduction_angle = (-rot_angles_flat[:, 1]).unsqueeze(1)
r_1 = rotation_matrix(abduction_angle, rotated_x)
flexion_angle = (-rot_angles_flat[:, 0] + canonical_rot_flat[:, 0]).unsqueeze(1)
# flexion_angle = (-rot_angles_flat[:, 0]).unsqueeze(1)
r_2 = rotation_matrix(flexion_angle, y)
# print("abduction angle", abduction_angle.shape)
# assert False
# print("r_1", r_1.shape)
# print("r_2", r_2.shape)
r = 0
r = torch.bmm(r_2.float(), r_1.float())
r = r.reshape(batch_size, bone, 3, 3)
# mask root bones rotation
r[:, :5] = torch.eye(3, device=r.device)
# print("final r", r)
# x_angle = rot_angles[:, :, 0]
# y_angle = rot_angles[:, :, 1]
# print("rot_angles", rot_angles.shape)
# print("x_angle", x_angle.shape)
# print("y_angle", y_angle.shape)
# rot_x_mat = get_rot_mat_x(x_angle)
# rot_y_mat = get_rot_mat_y(y_angle)
# rot_x_y = torch.matmul(rot_y_mat, rot_x_mat)
return r # rot_x_y
def get_scale_mat_from_bone_lengths(self, bone_lengths):
scale_mat = torch.eye(3, device=bone_lengths.device).repeat(*bone_lengths.shape[:2], 1, 1)
# print("eye", scale_mat, scale_mat.shape)
scale_mat = bone_lengths.unsqueeze(-1) * scale_mat
# print("scale_mat", scale_mat, scale_mat.shape)
return scale_mat
def get_trans_mat_with_translation(self, trans_mat_without_scale_translation, local_coords_after_unpose, bones, bone_lengths):
# print("---- get trans mat with translation -----")
# print("trans_mat_without_scale_translation", trans_mat_without_scale_translation.shape)
# print("bones", bones.shape)
# print("bone_lengths", bone_lengths.shape)
translation = local_coords_after_unpose * bone_lengths
# translation = translation.unsqueeze(-1)
# print("translation", translation.shape)
# print(translation)
# add translation along kinematic chain
# no root
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
root_trans = translation[:, lev_1] * 0.
lev_1_trans = translation[:, lev_1]
lev_2_trans = translation[:, lev_2] + lev_1_trans
lev_3_trans = translation[:, lev_3] + lev_2_trans
lev_4_trans = translation[:, lev_4] + lev_3_trans
# print("lev_1_trans", lev_1_trans)
# print("lev_3_trans", lev_3_trans, lev_3_trans.shape)
# final_trans = torch.cat([lev_1_trans, lev_2_trans, lev_3_trans, lev_4_trans], dim=1)
final_trans = torch.cat([root_trans, lev_1_trans, lev_2_trans, lev_3_trans], dim=1)
# print("final trans", final_trans, final_trans.shape)
final_trans = final_trans.unsqueeze(-1)
trans_mat = torch.cat([trans_mat_without_scale_translation, final_trans], dim=3)
# print("trans_mat", trans_mat.shape)
last_row = torch.tensor([0., 0., 0., 1.], device=trans_mat.device).repeat(*trans_mat.shape[:2], 1 , 1)
trans_mat = torch.cat([trans_mat, last_row], dim=2)
# start_point = (bones[idx] * bone_lengths[idx])
# print("---- END get trans mat with translation -----")
return trans_mat
# def get_trans_mat_kinematic_chain(self, trans_mat_3_4):
# print("**** kinematic chain ****")
# trans_mat = trans_mat_3_4
# last_row = torch.tensor([0., 0., 0., 1.], device=trans_mat_3_4.device).repeat(*trans_mat_3_4.shape[:2], 1 , 1)
# trans_mat = torch.cat([trans_mat_3_4, last_row], dim=2)
# print("trans_mat", trans_mat.shape)
# # no root
# lev_1 = [0, 1, 2, 3, 4]
# lev_2 = [5, 6, 7, 8, 9]
# lev_3 = [10, 11, 12, 13, 14]
# lev_4 = [15, 16, 17, 18, 19]
# lev_1_mat = trans_mat[:, lev_1, : , :]
# lev_2_mat = torch.matmul(trans_mat[:, lev_2, : , :], lev_1_mat)
# lev_3_mat = torch.matmul(trans_mat[:, lev_3, : , :], lev_2_mat)
# lev_4_mat = torch.matmul(trans_mat[:, lev_4, : , :], lev_3_mat)
# print("lev_1_mat", lev_1_mat.shape)
# print("lev_4_mat", lev_4_mat.shape)
# final_mat = torch.cat([lev_1_mat, lev_2_mat, lev_3_mat, lev_4_mat], dim=1)
# # trans_mat = final_mat
# print("**** END kinematic chain ****")
# return trans_mat
def from_3x3_mat_to_4x4(self, mat_3x3):
last_col = torch.zeros(1, device=mat_3x3.device).repeat(*mat_3x3.shape[:2], 3, 1)
mat_3x4 = torch.cat([mat_3x3, last_col], dim=3)
# print("mat_3x3", mat_3x3.shape)
last_row = torch.tensor([0., 0., 0., 1.], device=mat_3x4.device).repeat(*mat_3x4.shape[:2], 1 , 1)
mat_4x4 = torch.cat([mat_3x4, last_row], dim=2)
return mat_4x4
def compute_adjusted_transpose(self, local_cs, rot_mat):
lev_1 = [0, 1, 2, 3, 4]
lev_2 = [5, 6, 7, 8, 9]
lev_3 = [10, 11, 12, 13, 14]
lev_4 = [15, 16, 17, 18, 19]
lev_1_cs = local_cs[:, lev_1]
lev_2_cs = local_cs[:, lev_2]
lev_2_rot = rot_mat[:, lev_2]
lev_3_cs = torch.matmul(lev_2_rot, local_cs[:, lev_3])
lev_3_rot = torch.matmul(rot_mat[:, lev_3], lev_2_rot)
lev_4_cs = torch.matmul(lev_3_rot, local_cs[:, lev_4])
# lev_3_rot = torch.matmul(rot_mat[:, lev_3], lev_2_rot)
adjust_cs = torch.cat([lev_1_cs, lev_2_cs, lev_3_cs, lev_4_cs], dim=1)
loacl_cs_transpose = torch.transpose(local_cs, -2, -1) + 0
loacl_cs_transpose[:, lev_3] = torch.matmul(loacl_cs_transpose[:, lev_3], lev_2_rot)
loacl_cs_transpose[:, lev_4] = torch.matmul(loacl_cs_transpose[:, lev_4], lev_3_rot)
transpose_cs = torch.transpose(adjust_cs, -2, -1)
# transpose_cs = torch.inverse(adjust_cs)
return loacl_cs_transpose # transpose_cs
def normalize_root_planes(self, bones, bone_lengths):
# root = torch.zeros([3])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d', title='bones')
# plot_local_coord(bones, bone_lengths, root, ax, show=False)
bones_ori = bones + 0
root_plane_norm_mat = torch.eye(3, device=self.dev).repeat(bones.shape[0], 20, 1, 1)
# canonical_angle defines angles between root bone planes
# angle between (n0,n1), (n1,n2), (n2,n3)
canonical_angle = self.root_plane_angles
# canonical_angle = np.array([0.8, 0.2, 0.2])
bones_0 = bones[:, 0]
bones_1 = bones[:, 1]
bones_2 = bones[:, 2]
bones_3 = bones[:, 3]
bones_4 = bones[:, 4]
# Use the plane between index(1) and middle(2) finger as reference plane (ref n1)
# The normal of this plane is pointing toward y
n1 = torch.cross(bones_2, bones_1)
# Thumb and index (plane 0)
n0 = torch.cross(bones_1, bones_0)
n0_n1_angle = signed_angle(n0, n1, bones_1)
# Rotate thumb root bone
thumb_trans = rotation_matrix(n0_n1_angle - canonical_angle[0], bones_1)
root_plane_norm_mat[:, 0] = thumb_trans
# bones_plot = torch.matmul(root_plane_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Middle and ring (plane 2)
n2 = torch.cross(bones_3, bones_2)
n2_n1_angle = signed_angle(n2, n1, bones_2)
# Rotate ring finger root bone, apply the same transformation to pinky
ring_trans = rotation_matrix(n2_n1_angle + canonical_angle[1], bones_2)
bones_3 = torch.matmul(ring_trans, bones_3.unsqueeze(-1)).squeeze(-1)
bones_4 = torch.matmul(ring_trans, bones_4.unsqueeze(-1)).squeeze(-1)
root_plane_norm_mat[:, 3] = ring_trans
root_plane_norm_mat[:, 4] = ring_trans
# bones_plot = torch.matmul(root_plane_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Ring and pinky (plane 3)
n3 = torch.cross(bones_4, bones_3)
n2 = torch.cross(bones_3, bones_2)
n3_n2_angle = signed_angle(n3, n2, bones_3)
# Rotate index finger root bone, apply the same transformation to thumb
pinky_trans = rotation_matrix(n3_n2_angle + canonical_angle[2], bones_3)
root_plane_norm_mat[:, 4] = torch.matmul(pinky_trans, ring_trans)
# bones_plot = torch.matmul(root_plane_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Propagate rotations along kinematic chains
for i in range(5):
for j in range(3):
root_plane_norm_mat[:, (j+1)*5 + i] = root_plane_norm_mat[:, i]
new_bones = torch.matmul(root_plane_norm_mat.double(), bones_ori.unsqueeze(-1).double()).squeeze(-1)
# plot_local_coord(new_bones, bone_lengths, root, ax)
# import pdb; pdb.set_trace()
return new_bones, root_plane_norm_mat
def normalize_root_bone_angles(self, bones, bone_lengths):
# root = torch.zeros([3])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d', title='bones angle')
# plot_local_coord(bones, bone_lengths, root, ax, show=False)
bones_ori = bones + 0
canonical_angle = 0.2 # 0.1
# angle between (t,i), (i,m), (m,r), (r,p)
# canonical_angle = self.root_bone_angles
canonical_angle = np.array([0.4, 0.2, 0.2, 0.2])
bones_0 = bones[:, 0]
bones_1 = bones[:, 1]
bones_2 = bones[:, 2]
bones_3 = bones[:, 3]
bones_4 = bones[:, 4]
root_angle_norm_mat = torch.eye(3, device=self.dev).repeat(bones.shape[0], 20, 1, 1)
# canonical_angle defines angles between adjacent bones
# Use middle finger (f2) as reference
# Plane normals always pointing out of the back of the hand
# Index finger (f1), apply the same transformation to thumb (f0)
n1 = cross(bones_2, bones_1, do_normalize=True)
f2_f1_angle = signed_angle(bones_2, bones_1, n1)
index_trans = rotation_matrix(canonical_angle[1] - f2_f1_angle, n1)
root_angle_norm_mat[:, 1] = index_trans
root_angle_norm_mat[:, 0] = index_trans
bones_1 = torch.matmul(index_trans, bones_1.unsqueeze(-1)).squeeze(-1)
bones_0 = torch.matmul(index_trans, bones_0.unsqueeze(-1)).squeeze(-1)
# bones_plot = torch.matmul(root_angle_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Thumb (f0)
n0 = cross(bones_1, bones_0, do_normalize=True)
f1_f0_angle = signed_angle(bones_1, bones_0, n0)
thumb_trans = rotation_matrix(canonical_angle[0] - f1_f0_angle, n0)
root_angle_norm_mat[:, 0] = torch.matmul(thumb_trans, index_trans)
bones_0 = torch.matmul(thumb_trans, bones_0.unsqueeze(-1)).squeeze(-1)
# bones_plot = torch.matmul(root_angle_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Ring finger (f3), apply the same transformation to pinky finger (f4)
# Notice the sign change in rotation_matrix()
n2 = cross(bones_3, bones_2, do_normalize=True)
f3_f2_angle = signed_angle(bones_3, bones_2, n2)
ring_trans = rotation_matrix(f3_f2_angle - canonical_angle[2], n2)
root_angle_norm_mat[:, 3] = ring_trans
root_angle_norm_mat[:, 4] = ring_trans
bones_3 = torch.matmul(ring_trans, bones_3.unsqueeze(-1)).squeeze(-1)
bones_4 = torch.matmul(ring_trans, bones_4.unsqueeze(-1)).squeeze(-1)
# bones_plot = torch.matmul(root_angle_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Pinky finger (f4)
n3 = cross(bones_4, bones_3, do_normalize=True)
f4_f3_angle = signed_angle(bones_4, bones_3, n3)
pinky_trans = rotation_matrix(f4_f3_angle - canonical_angle[3], n3)
root_angle_norm_mat[:, 4] = torch.matmul(pinky_trans, ring_trans)
bones_4 = torch.matmul(pinky_trans, bones_4.unsqueeze(-1)).squeeze(-1)
# bones_plot = torch.matmul(root_angle_norm_mat, bones_ori.unsqueeze(-1)).squeeze(-1)
# plot_local_coord(bones_plot, bone_lengths, root, ax, show=True)
# Propagate rotations along kinematic chains
for i in range(5):
for j in range(3):
root_angle_norm_mat[:, (j+1)*5 + i] = root_angle_norm_mat[:, i]
new_bones = torch.matmul(root_angle_norm_mat.double(), bones_ori.unsqueeze(-1).double()).squeeze(-1)
# plot_local_coord(new_bones, bone_lengths, root, ax, show=True)
# import pdb; pdb.set_trace()
return new_bones, root_angle_norm_mat
def forward(self, joints, kp3d_is_right, return_rot_only=False):
assert joints.size(1) == 21, "Number of joints needs to be 21"
nrb_idx = self.nrb_idx
# Pre-process the joints
joints = self.preprocess_joints(joints, kp3d_is_right)
# Compute the bone vectors
bones, bone_lengths, kp_to_bone_mat = self.kp3D_to_bones(joints)
bone_tmp = bones + 0
# New normalization
# Normalize the root bone planes
plane_normalized_bones, root_plane_norm_mat = self.normalize_root_planes(bones, bone_lengths)
# Normalize angles between root bones
angle_normalized_bones, root_angle_norm_mat = self.normalize_root_bone_angles(plane_normalized_bones, bone_lengths)
bones = angle_normalized_bones
# Plot root bone normalization
# root = torch.zeros([3])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d', title='bones angle')
# plot_local_coord(bones, bone_lengths, root, ax, show=False)
# plot_local_coord(angle_normalized_bones, bone_lengths, root, ax, show=True)
# Combine plane normalization and angle normalization
root_bones_norm_mat = torch.matmul(root_angle_norm_mat, root_plane_norm_mat) # root_plane_norm_mat
# print("root_bones_norm_mat", root_bones_norm_mat.shape)
# root_bones_norm_mat = torch.eye(3, device=bones.device).reshape(1, 1, 3, 3).repeat(*bones.shape[0:2], 1, 1)
# print("root_bones_norm_mat", root_bones_norm_mat.shape)
# Compute the local coordinate systems for each bone
# This assume the root bones are fixed
local_cs = self.compute_local_coordinate_system(bones.double())
# Compute the local coordinates
local_coords = self.compute_local_coordinates(bones.float(), local_cs.float())
# root = torch.zeros([1,21,3])
root = torch.zeros([3])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d', title='local_coords')
# # plot_local_coord_system(local_cs, bone_lengths, root, ax)
# plot_local_coord(local_coords, bone_lengths, root, ax)
# Copmute the rotation around the y and rotated-x axis
rot_angles = self.compute_rot_angles(local_coords)
if return_rot_only:
nrb_rot_angles = rot_angles[:, nrb_idx]
return nrb_rot_angles
# print("rot angles", rot_angles)
# Compute rotation matrix
rot_mat = self.compute_rotation_matrix(rot_angles.float(), local_coords.float())
# print("rot mat", rot_mat.shape)
# print("local_cs", local_cs.shape)
# loacl_cs_transpose = torch.transpose(local_cs, -2, -1)
loacl_cs_transpose = self.compute_adjusted_transpose(local_cs, rot_mat)
# print("loacl_cs_transpose", loacl_cs_transpose.shape)
# should_be_i = torch.matmul(loacl_cs_transpose, local_cs)
# print("sanity check", should_be_i)
# print("sanity check transpose", torch.bmm(loacl_cs_transpose, local_cs))
trans_mat_without_scale_translation = torch.matmul(loacl_cs_transpose, torch.matmul(rot_mat, local_cs))
# print("trans_mat_without_scale_translation", trans_mat_without_scale_translation)
####
# local_coords_no_back_proj = self.compute_local_coordinates(bones, torch.matmul(rot_mat, local_cs))
# print("--- local_coords_no_back_proj ---", local_coords_no_back_proj)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# Compute local coordinates of each bone after unposing to adjust keypoint translation
local_coords_after_unpose = self.compute_local_coordinates(bones.float(), trans_mat_without_scale_translation.float())
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# # # plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# plot_local_coord(local_coords_after_unpose, bone_lengths, root, ax) # , show=False)
#### normal transpose
# loacl_cs_normal_transpose = torch.transpose(local_cs, -2, -1)
# trans_mat_normal_transpose = torch.matmul(loacl_cs_normal_transpose, torch.matmul(rot_mat, local_cs))
# # print("loacl_cs_transpose", loacl_cs_transpose.shape)
# local_coords_normal_transpose = self.compute_local_coordinates(bones, trans_mat_normal_transpose)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# plot_local_coord(local_coords_no_back_proj, bone_lengths, root, ax)
# print("normal transpose")
# plot_local_coord(local_coords_normal_transpose, bone_lengths, root, ax)
# print("bone_lengths", bone_lengths, bone_lengths.shape)
# scale_mat = self.get_scale_mat_from_bone_lengths(bone_lengths)
# print("scale_mat", scale_mat, scale_mat.shape)
# trans_mat_without_translation = torch.matmul(scale_mat, trans_mat_without_scale_translation)
# This return 3 x 4 transformation matrix with translation
# trans_mat_with_translation = self.get_trans_mat_with_translation(
# trans_mat_without_scale_translation, local_coords_after_unpose, bones, bone_lengths)
# This return 4 x 4 transformation matrix
# trans_mat_kinematic_chain = self.get_trans_mat_kinematic_chain(trans_mat_with_translation)
trans_mat_without_scale_translation = self.from_3x3_mat_to_4x4(trans_mat_without_scale_translation)
# Convert bones back to keypoints
inv_scale_trans = self.compute_bone_to_kp_mat(bone_lengths, local_coords_after_unpose)
# Combine everything into one transformation matrix from posed keypoints to unposed keypoints
# import pdb; pdb.set_trace()
# root_bones_norm_mat
trans_mat = torch.matmul(self.from_3x3_mat_to_4x4(root_bones_norm_mat.float()), kp_to_bone_mat.float())
trans_mat = torch.matmul(trans_mat_without_scale_translation, trans_mat)
# trans_mat = torch.matmul(trans_mat_without_scale_translation, kp_to_bone_mat)
trans_mat = torch.matmul(inv_scale_trans.double(), trans_mat.double())
# Add root keypoint tranformation
# import pdb; pdb.set_trace()
root_trans = torch.eye(4, device=trans_mat.device).reshape(1, 1, 4, 4).repeat(trans_mat.shape[0], 1, 1, 1)
trans_mat = torch.cat([root_trans, trans_mat], dim=1)
bone_lengths = torch.cat([torch.ones([trans_mat.shape[0], 1, 1], device=trans_mat.device), bone_lengths], dim=1)
# Compute the angle loss
# def interval_loss(x, min_v, max_v):
# if min_v.dim() == 1:
# min_v = min_v.unsqueeze(0)
# max_v = max_v.unsqueeze(0)
# zero = self.zero
# return (torch.max(min_v - x, zero) + torch.max(x - max_v, zero))
# Discard the root bones
nrb_rot_angles = rot_angles[:, nrb_idx]
# Compute the polygon distance
# poly_d = self.polygon_distance(nrb_rot_angles)
# Compute the final loss
# per_batch_loss = poly_d.mean(1)
# angle_loss = per_batch_loss.mean()
# Storage for debug purposes
# self.per_batch_loss = per_batch_loss.detach()
self.bones = bones.detach()
self.local_cs = local_cs.detach()
self.local_coords = local_coords.detach()
self.nrb_rot_angles = nrb_rot_angles.detach()
# self.loss_per_sample = poly_d.detach()
return trans_mat, bone_lengths # rot_mat # rot_angles # angle_loss
if __name__ == '__main__':
import sys
sys.path.append('.')
import matplotlib.pyplot as plt
plt.ion()
from pose.utils.visualization_2 import plot_fingers
import yaml
from tqdm import tqdm
from prototypes.utils import get_data_reader
dev = torch.device('cpu')
# Load constraints
cfg_path = "hp_params/all_params.yaml"
hand_constraints = yaml.load(open(cfg_path).read())
# Hand parameters
for k,v in hand_constraints.items():
if isinstance(v, list):
hand_constraints[k] = torch.from_numpy(np.array(v)).float()
angle_poly = hand_constraints['convex_hull']
angle_loss = AngleLoss(angle_poly, dev=dev)
##### Consistency test. Make sure loss is 0 for all samples
# WARNING: This will fail because we approximate the angle polygon
# Get data reader
data_reader = get_data_reader(ds_name='stb', is_train=True)
# Make sure error is 0 for all samples of the training set
tol = 1e-8
for i in tqdm(range(len(data_reader))):
sample = data_reader[i]
kp3d = sample["joints3d"].view(-1,21,3)
is_right = sample["kp3d_is_right"].view(-1,1)
loss = angle_loss(kp3d, is_right)
import pdb;pdb.set_trace()
if loss > tol:
print("ERROR")
plot_fingers(kp3d[0])
import pdb;pdb.set_trace()
# Shifting shouldnt cause an issue neither
kp3d_center = kp3d - kp3d[:,0:1]
loss = angle_loss(kp3d_center, is_right)
if loss > tol:
print("ERROR")
plot_fingers(kp3d_center[0])
import pdb;pdb.set_trace()
# Scaling should be 0 error too
kp3d_scale = kp3d * 10
loss = angle_loss(kp3d_scale, is_right)
if loss > tol:
print("ERROR")
plot_fingers(kp3d_scale[0])
import pdb;pdb.set_trace()
##### SGD Test
# torch.manual_seed(4)
# x = torch.zeros(1,21,3)
# x[:,1:6] = torch.rand(1,5,3)
# is_right = torch.tensor(1.0).view(1,1)
# x.requires_grad_()
# print_freq = 100
# lr = 1e-1
# ax = None
# i = 0
# while True:
# # while i < 100:
# loss = root_bone_loss(x, is_right)
# loss.backward()
# if (i % print_freq) == 0:
# print("It: %d\tLoss: %.08f" % (i,loss.item()))
# to_plot = x[0].clone()
# ax = plot_fingers(to_plot, ax=ax, set_view=False)
# to_plot = to_plot.detach().numpy()
# ax.plot(to_plot[1:6,0], to_plot[1:6,1], to_plot[1:6,2], 'b')
# plt.show()
# plt.pause(0.001)
# if i == 0:
# # Pause for initial conditions
# input()
# with torch.no_grad():
# x = x - lr * x.grad
# x.requires_grad_()
# i += 1
| 56,863 | 40.9042 | 130 | py |
Im2Hands | Im2Hands-main/dependencies/halo/halo_adapter/interface.py | # For interfacing with the HALO mesh model code
import argparse
import trimesh
import numpy as np
import os
import torch
import sys
sys.path.insert(0, "../../halo_base")
#from artihand import config #, data
from artihand.checkpoints import CheckpointIO
def get_halo_model(config_file):
'''
Args:
config_file (str): HALO config file
'''
no_cuda = False
print("config_file", config_file)
cfg = config.load_config(config_file, '../halo_base/configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Model
model = config.get_model(cfg, device=device)
out_dir = cfg['training']['out_dir']
checkpoint_io = CheckpointIO(out_dir, model=model)
checkpoint_io.load(cfg['test']['model_file'])
# print(checkpoint_io.module_dict['model'])
# print(model.state_dict().keys())
# Generator
generator = config.get_generator(model, cfg, device=device)
# print("upsampling", generator.upsampling_steps)
return model, generator
def convert_joints(joints, source, target):
halo_joint_to_mano = np.array([0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20])
mano_joint_to_halo = np.array([0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20])
mano_joint_to_biomech = np.array([0, 1, 5, 9, 13, 17, 2, 6, 10, 14, 18, 3, 7, 11, 15, 19, 4, 8, 12, 16, 20])
biomech_joint_to_mano = np.array([0, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8, 13, 18, 4, 9, 14, 19, 5, 10, 15, 20])
halo_joint_to_biomech = np.array([0, 13, 1, 4, 10, 7, 14, 2, 5, 11, 8, 15, 3, 6, 12, 9, 16, 17, 18, 19, 20])
# halo_joint_to_biomech = np.array([0, 11, 15, 19, 4, 1, 5, 9, 8, 13, 17, 2, 12, 18, 3, 7, 16, 6, 10, 14, 20])
biomech_joint_to_halo = np.array([0, 2, 7, 12, 3, 8, 13, 5, 10, 15, 4, 9, 14, 1, 6, 11, 16, 17, 18, 19, 20])
if source == 'halo' and target == 'biomech':
# conn = nasa_joint_to_mano[mano_joint_to_biomech]
# print(conn)
# tmp_mano = joints[:, halo_joint_to_mano]
# out = tmp_mano[:, mano_joint_to_biomech]
return joints[:, halo_joint_to_biomech]
if source == 'biomech' and target == 'halo':
return joints[:, biomech_joint_to_halo]
if source == 'mano' and target == 'biomech':
return joints[:, mano_joint_to_biomech]
if source == 'biomech' and target == 'mano':
return joints[:, biomech_joint_to_mano]
if source == 'halo' and target == 'mano':
return joints[:, halo_joint_to_mano]
if source == 'mano' and target == 'halo':
return joints[:, mano_joint_to_halo]
print("-- Undefined convertion. Return original tensor --")
return joints
def change_axes(keypoints, source='mano', target='halo'):
"""Swap axes to match that of NASA
"""
# Swap axes from local_cs to NASA
kps_halo = keypoints + 0
kps_halo[..., 0] = keypoints[..., 1]
kps_halo[..., 1] = keypoints[..., 2]
kps_halo[..., 2] = keypoints[..., 0]
mat = torch.zeros([4, 4], device=keypoints.device)
mat[0, 1] = 1.
mat[1, 2] = 1.
mat[2, 0] = 1.
mat[3, 3] = 1.
return kps_halo, mat
def get_bone_lengths(joints, source='biomech', target='halo'):
''' To get the bone lengths for halo inputs
'''
joints = convert_joints(joints, source=source, target=target)
bones_idx = np.array([
(0, 4), # use distance from root to middle finger as palm bone length
(1, 2),
(2, 3),
(3, 17),
(4, 5),
(5, 6),
(6, 18),
(7, 8),
(8, 9),
(9, 20),
(10, 11),
(11, 12),
(12, 19),
(13, 14),
(14, 15),
(15, 16)
])
bones = joints[:, bones_idx[:, 0]] - joints[:, bones_idx[:, 1]]
bone_lengths = torch.norm(bones, dim=-1)
return bone_lengths
def scale_halo_trans_mat(trans_mat, scale=0.4):
''' Scale the transformation matrices to match the scale of HALO.
Maybe this should be in the HALO package.
Args:
trans_mat: Transformation matrices that are already inverted (from pose to unpose)
'''
# Transform meta data
# Assume that the transformation matrices are already inverted
scale_mat = torch.eye(4, device=trans_mat.device).reshape(1, 1, 4, 4).repeat(trans_mat.shape[0], 1, 1, 1) * scale
scale_mat[:, :, 3, 3] = 1.
nasa_input = torch.matmul(trans_mat.double(), scale_mat.double())
# (optional) scale canonical pose by the same global scale to make learning occupancy function easier
canonical_scale_mat = torch.eye(4, device=trans_mat.device).reshape(1, 1, 4, 4).repeat(trans_mat.shape[0], 1, 1, 1) / scale
canonical_scale_mat[:, :, 3, 3] = 1.
nasa_input = torch.matmul(canonical_scale_mat.double(), nasa_input.double())
return nasa_input
| 4,857 | 35.253731 | 127 | py |
Im2Hands | Im2Hands-main/dependencies/halo/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# from torchvision.utils import save_image
# import im2mesh.common as common
# def visualize_data(data, data_type, out_file):
# r''' Visualizes the data with regard to its type.
# Args:
# data (tensor): batch of data
# data_type (string): data type (img, voxels or pointcloud)
# out_file (string): output file
# '''
# if data_type == 'trans_matrix':
# visualize_transmatrix(data, out_file=out_file)
# elif data_type == 'img':
# if data.dim() == 3:
# data = data.unsqueeze(0)
# save_image(data, out_file, nrow=4)
# elif data_type == 'voxels':
# visualize_voxels(data, out_file=out_file)
# elif data_type == 'pointcloud':
# visualize_pointcloud(data, out_file=out_file)
# elif data_type is None or data_type == 'idx':
# pass
# else:
# raise ValueError('Invalid data_type "%s"' % data_type)
def set_ax_limits(ax, lim=100.0): # , lim=10.0): # 0.1
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
def plot_skeleton_single_view(joints, joint_order='biomech', object_points=None,
color='r', ax=None, show=True):
if ax is None:
print('new fig')
print(joints)
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
# set_ax_limits(ax)
# Skeleton definition
mano_joint_parent = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
biomech_joint_parent = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
halo_joint_parent = np.array([0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15, 3, 6, 12, 9])
if joint_order == 'mano':
joint_parent = mano_joint_parent
elif joint_order == 'biomech':
joint_parent = biomech_joint_parent
elif joint_order == 'halo':
joint_parent = halo_joint_parent
if object_points is not None:
ax.scatter(object_points[:, 0], object_points[:, 1], object_points[:, 2], alpha=0.1, c='r')
ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.8, c='b')
b_start_loc = joints[joint_parent] # joints[biomech_joint_parent]
b_end_loc = joints
for b in range(21):
if b == 1:
cur_color = 'g'
else:
cur_color = color
ax.plot([b_start_loc[b, 0], b_end_loc[b, 0]],
[b_start_loc[b, 1], b_end_loc[b, 1]],
[b_start_loc[b, 2], b_end_loc[b, 2]], color=cur_color)
if show:
print('show')
fig.show()
# plt.close(fig)
def get_joint_parent(joint_order):
if joint_order == 'mano':
return np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
if joint_order == 'biomech':
return np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
if joint_order == 'halo':
return np.array([0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15, 3, 6, 12, 9])
def plot_skeleton(ax, joints, joint_parent, color, object_points=None):
if object_points is not None:
ax.scatter(object_points[:, 0], object_points[:, 1], object_points[:, 2], alpha=0.1, c='r')
ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], alpha=0.8, c='b')
b_start_loc = joints[joint_parent] # joints[biomech_joint_parent]
b_end_loc = joints
n_joint = 21
if len(joints) == 16:
n_joint = 16
# import pdb; pdb.set_trace()
for b in range(n_joint):
ax.plot([b_start_loc[b, 0], b_end_loc[b, 0]],
[b_start_loc[b, 1], b_end_loc[b, 1]],
[b_start_loc[b, 2], b_end_loc[b, 2]], color=color)
def visualise_skeleton(joints, object_points=None, joint_order='biomech', out_file=None,
color='g', title=None, show=False):
# Skeleton definition
mano_joint_parent = np.array([0, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19])
biomech_joint_parent = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
halo_joint_parent = np.array([0, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15, 3, 6, 12, 9])
if joint_order == 'mano':
joint_parent = mano_joint_parent
elif joint_order == 'biomech':
joint_parent = biomech_joint_parent
elif joint_order == 'halo':
joint_parent = halo_joint_parent
# Create plot
fig = plt.figure(figsize=(13.5, 9))
if title is not None:
fig.suptitle(title)
# ax = fig.add_subplot(111, projection='3d')
# ax = fig.gca(projection=Axes3D.name)
# set_ax_limits(ax)
for i in range(6):
ax = fig.add_subplot(2, 3, i + 1, projection='3d')
set_ax_limits(ax)
ax.view_init(elev=10., azim=i * 60)
plot_skeleton(ax, joints, joint_parent, color, object_points=object_points)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
# if show or (out_file is not None):
plt.close(fig)
def display_mano_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, show=True):
"""
Displays hand batch_idx in batch of hand_info, hand_info as returned by
generate_random_hand
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
verts, joints = hand_info['verts'], hand_info['joints']
rest_joints = hand_info['rest_joints']
# verts_joints_assoc = hand_info['verts_assoc']
# import pdb; pdb.set_trace()
visualize_bone = 13
# rest_verts = hand_info['rest_verts']
if mano_faces is None:
ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1)
else:
mesh = Poly3DCollection(verts[mano_faces], alpha=alpha)
face_color = (141 / 255, 184 / 255, 226 / 255)
edge_color = (50 / 255, 50 / 255, 50 / 255)
mesh.set_edgecolor(edge_color)
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
# print("Joints", joints)
print("joint shape", joints.shape)
ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r')
# ax.scatter(joints[:16, 0], joints[:16, 1], joints[:16, 2], color='r')
ax.scatter(rest_joints[:, 0], rest_joints[:, 1], rest_joints[:, 2], color='g')
# ax.scatter(rest_joints[:4, 0], rest_joints[:4, 1], rest_joints[:4, 2], color='g')
# ax.scatter(rest_joints[4:, 0], rest_joints[4:, 1], rest_joints[4:, 2], color='b')
# visualize only some part
# seleceted = verts_joints_assoc[:-1] == visualize_bone
# ax.scatter(verts[seleceted, 0], verts[seleceted, 1], verts[seleceted, 2], color='black', alpha=0.5)
# cam_equal_aspect_3d(ax, verts.numpy())
cam_equal_aspect_3d(ax, verts)
# cam_equal_aspect_3d(ax, rest_joints.numpy())
if show:
plt.show()
def cam_equal_aspect_3d(ax, verts, flip_x=False):
"""
Centers view on cuboid containing hand and flips y and z axis
and fixes azimuth
"""
extents = np.stack([verts.min(0), verts.max(0)], axis=1)
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize / 2
if flip_x:
ax.set_xlim(centers[0] + r, centers[0] - r)
else:
ax.set_xlim(centers[0] - r, centers[0] + r)
# Invert y and z axis
ax.set_ylim(centers[1] + r, centers[1] - r)
ax.set_zlim(centers[2] + r, centers[2] - r)
# def visualize_voxels(voxels, out_file=None, show=False):
# r''' Visualizes voxel data.
# Args:
# voxels (tensor): voxel data
# out_file (string): output file
# show (bool): whether the plot should be shown
# '''
# # Use numpy
# voxels = np.asarray(voxels)
# # Create plot
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# voxels = voxels.transpose(2, 0, 1)
# ax.voxels(voxels, edgecolor='k')
# ax.set_xlabel('Z')
# ax.set_ylabel('X')
# ax.set_zlabel('Y')
# ax.view_init(elev=30, azim=45)
# if out_file is not None:
# plt.savefig(out_file)
# if show:
# plt.show()
# plt.close(fig)
# def visualize_transmatrix(voxels, out_file=None, show=False):
# r''' Visualizes voxel data.
# Args:
# voxels (tensor): voxel data
# out_file (string): output file
# show (bool): whether the plot should be shown
# '''
# # Use numpy
# voxels = np.asarray(voxels)
# # Create plot
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# voxels = voxels.transpose(2, 0, 1)
# ax.voxels(voxels, edgecolor='k')
# ax.set_xlabel('Z')
# ax.set_ylabel('X')
# ax.set_zlabel('Y')
# ax.view_init(elev=30, azim=45)
# if out_file is not None:
# plt.savefig(out_file)
# if show:
# plt.show()
# plt.close(fig)
# def visualize_pointcloud(points, normals=None,
# out_file=None, show=False):
# r''' Visualizes point cloud data.
# Args:
# points (tensor): point data
# normals (tensor): normal data (if existing)
# out_file (string): output file
# show (bool): whether the plot should be shown
# '''
# # Use numpy
# points = np.asarray(points)
# # Create plot
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# ax.scatter(points[:, 2], points[:, 0], points[:, 1])
# if normals is not None:
# ax.quiver(
# points[:, 2], points[:, 0], points[:, 1],
# normals[:, 2], normals[:, 0], normals[:, 1],
# length=0.1, color='k'
# )
# ax.set_xlabel('Z')
# ax.set_ylabel('X')
# ax.set_zlabel('Y')
# ax.set_xlim(-0.5, 0.5)
# ax.set_ylim(-0.5, 0.5)
# ax.set_zlim(-0.5, 0.5)
# ax.view_init(elev=30, azim=45)
# if out_file is not None:
# plt.savefig(out_file)
# if show:
# plt.show()
# plt.close(fig)
# def visualise_projection(
# self, points, world_mat, camera_mat, img, output_file='out.png'):
# r''' Visualizes the transformation and projection to image plane.
# The first points of the batch are transformed and projected to the
# respective image. After performing the relevant transformations, the
# visualization is saved in the provided output_file path.
# Arguments:
# points (tensor): batch of point cloud points
# world_mat (tensor): batch of matrices to rotate pc to camera-based
# coordinates
# camera_mat (tensor): batch of camera matrices to project to 2D image
# plane
# img (tensor): tensor of batch GT image files
# output_file (string): where the output should be saved
# '''
# points_transformed = common.transform_points(points, world_mat)
# points_img = common.project_to_camera(points_transformed, camera_mat)
# pimg2 = points_img[0].detach().cpu().numpy()
# image = img[0].cpu().numpy()
# plt.imshow(image.transpose(1, 2, 0))
# plt.plot(
# (pimg2[:, 0] + 1)*image.shape[1]/2,
# (pimg2[:, 1] + 1) * image.shape[2]/2, 'x')
# plt.savefig(output_file) | 11,311 | 34.684543 | 106 | py |
Im2Hands | Im2Hands-main/dependencies/halo/data/inference.py | import os
import logging
from torch.utils import data
import numpy as np
import yaml
import pickle
import torch
import trimesh
from models.data.input_helpers import random_rotate, rot_mat_by_angle
logger = logging.getLogger(__name__)
class InferenceDataset(data.Dataset):
''' Dataset class for inference. Only object meshes are available
'''
def __init__(self, dataset_folder, input_helpers=None, split=None, sample_surface=True,
no_except=True, transforms=None, return_idx=False, use_bps=False, random_rotate=False):
''' Initialization of the the 3D articulated hand dataset.
Args:
dataset_folder (str): dataset folder
input_helpers dict[(callable)]: helpers for data loading
split (str): which split is used
no_except (bool): no exception
transform dict{(callable)}: transformation applied to data points
return_idx (bool): wether to return index
'''
# Attributes
self.dataset_folder = dataset_folder
self.input_helpers = input_helpers
self.split = split
self.no_except = no_except
self.transforms = transforms
self.return_idx = return_idx
self.use_bps = use_bps
self.random_rotate = random_rotate
self.sample_surface = sample_surface
self.pc_sample = 600
# # Get all models
split_file = os.path.join(dataset_folder, 'datalist.txt')
with open(split_file, 'r') as f:
self.object_names = f.read().strip().split('\n')
# Use groundtruth rots and trans
self.use_gt_rots = False
object_names_full = []
obj_rots = []
obj_trans = []
if self.use_gt_rots:
for obj in self.object_names:
rot_file = os.path.join(dataset_folder, '..', 'grab_test', os.path.splitext(obj)[0])
with open(rot_file + '.pickle', 'rb') as p_file:
obj_meta = pickle.load(p_file)
for idx in range(len(obj_meta['rotmat'])):
object_names_full.append(obj)
obj_rots.append(obj_meta['rotmat'][idx])
obj_trans.append(obj_meta['trans_obj'][idx])
else:
n_sample = 10 # for ho3d # 5 for obman
for obj in self.object_names:
for i in range(n_sample):
object_names_full.append(obj)
self.object_names = object_names_full
self.obj_rots = obj_rots
self.obj_trans = obj_trans
self.use_inside_points = False
if self.use_inside_points:
# Load inside points
sampled_point_path = '../data/grab/test_sample_vol.npz'
sample_points = np.load(sampled_point_path)
points_dict = {}
for k in sample_points.files:
points_dict[k] = sample_points[k]
self.sample_points = points_dict
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.object_names) # len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
filename = os.path.join(self.dataset_folder, self.object_names[idx])
data = {}
if self.sample_surface:
input_mesh = trimesh.load(filename, process=False)
surface_points = trimesh.sample.sample_surface(input_mesh, self.pc_sample)[0]
surface_points = torch.from_numpy(surface_points).float()
else:
# surface_points = torch.from_numpy(load_points(filename)).float()
pass
if self.random_rotate:
# data['object_points'], data['hand_joints'], data['rot_mat'] = random_rotate(data['object_points'], data['hand_joints'])
x_angle = np.random.rand() * np.pi * 2.0
y_angle = np.random.rand() * np.pi * 2.0
z_angle = np.random.rand() * np.pi * 2.0
data['rot_mat'] = rot_mat_by_angle(x_angle, y_angle, z_angle)
# data['rot_mat'] = random_rotate(data['object_points'], data['hand_joints'])
else:
# data['rot_mat'] = self.obj_rots[idx]
data['rot_mat'] = np.eye(3)
rotated_surface_points = torch.matmul(torch.from_numpy(data['rot_mat']).float(), surface_points.unsqueeze(-1)).squeeze(-1)
surface_points = rotated_surface_points
data['mesh_path'] = filename
data['hand_joints'] = np.zeros([21, 3])
# data['rot_mat'] = np.eye(3)
data['object_points'] = surface_points
if self.use_bps:
data['scale'] = 100.0
data['obj_center'] = 0.0
# import pdb; pdb.set_trace()
bps_name = os.path.join(self.dataset_folder, 'bps_' + os.path.splitext(self.object_names[idx])[0] + '.npy')
obj_bps = np.load(bps_name)
data['object_bps'] = torch.from_numpy(obj_bps)
else:
data['scale'] = 100.0
data['obj_center'] = np.array([0., 0., 0.])
# data['object_points'], data['obj_center'] = self.obj_to_origin(surface_points)
data['object_points'], data['obj_center'] = self.scale_to_cm(data['object_points'], data['obj_center'], data['scale'])
# Get insdie points
if self.use_inside_points:
obj_name_no_ext = os.path.splitext(self.object_names[idx])[0]
inside_points = self.sample_points[obj_name_no_ext]
keep_idx = np.random.choice(len(inside_points), 2000, replace=False) # 600
inside_points = inside_points[keep_idx]
# inside_points = np.inside_points * self.rot_mats[idx].T
inside_points = np.matmul(data['rot_mat'], np.expand_dims(inside_points, -1)).squeeze(-1)
data['inside_points'] = inside_points * 100.0 - data['obj_center']
if self.return_idx:
data['idx'] = idx
return data
def obj_to_origin(self, object_points):
# obj_center = object_points.mean(axis=0)
min_p, _ = object_points.min(0)
max_p, _ = object_points.max(0)
obj_center = (max_p + min_p) / 2.0
return object_points - obj_center, obj_center
def scale_to_cm(self, object_points, obj_center, scale):
return object_points * scale, obj_center * scale
def get_model_dict(self, idx):
return self.object_names[idx]
def test_model_complete(self, category, model):
''' Tests if model is complete.
Args:
model (str): modelname
'''
model_path = os.path.join(self.dataset_folder, category, model)
files = os.listdir(model_path)
for field_name, field in self.fields.items():
if not field.check_complete(files):
logger.warn('Field "%s" is incomplete: %s'
% (field_name, model_path))
return False
return True
| 7,009 | 38.382022 | 133 | py |
Im2Hands | Im2Hands-main/dependencies/halo/data/utils.py | import os
import numpy as np
from torch.utils import data
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id) | 568 | 24.863636 | 77 | py |
Im2Hands | Im2Hands-main/dependencies/halo/data/obman.py | import os
import logging
from matplotlib.pyplot import axis
from torch.utils import data
import numpy as np
import yaml
import pickle
import torch
from scipy.spatial import distance
from models.data.input_helpers import random_rotate
from models.utils import visualize as vis
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
logger = logging.getLogger(__name__)
class ObmanDataset(data.Dataset):
''' Obman dataset class.
'''
def __init__(self, dataset_folder, input_helpers=None, split=None,
no_except=True, transforms=None, return_idx=False, use_bps=False, random_rotate=False):
''' Initialization of the the 3D articulated hand dataset.
Args:
dataset_folder (str): dataset folder
input_helpers dict[(callable)]: helpers for data loading
split (str): which split is used
no_except (bool): no exception
transform dict{(callable)}: transformation applied to data points
return_idx (bool): wether to return index
'''
# Attributes
self.dataset_folder = dataset_folder
self.input_helpers = input_helpers
self.split = split
self.no_except = no_except
self.transforms = transforms
self.return_idx = return_idx
self.use_bps = use_bps
self.obman_data = False
if self.use_bps:
split_file = os.path.join(dataset_folder, split + '_bps.npz')
all_data = np.load(split_file)
self.object_bps = all_data["object_bps"]
elif self.obman_data:
split_file = os.path.join(dataset_folder, split + '.pkl')
with open(split_file, "rb") as data_file:
all_data = pickle.load(data_file)
self.hand_joints = all_data["hand_joints3d"]
self.object_points = all_data["object_points3d"]
else:
split_file = os.path.join(dataset_folder, split + '_hand.npz')
all_data = np.load(split_file)
self.hand_joints = all_data["hand_joints3d"]
self.object_points = all_data["object_points3d"]
self.closest_point_idx = all_data["closest_obj_point_idx"]
self.closest_point_dist = all_data["closest_obj_point_dist"]
self.obj_names = all_data["obj_name"]
self.rot_mats = all_data["rot_mat"]
# load sample point inside
sample_point_file = os.path.join(dataset_folder, split + '_sample_vol.npz')
sample_points = np.load(sample_point_file)
points_dict = {}
for k in sample_points.files:
points_dict[k] = sample_points[k]
self.sample_points = points_dict
self.hand_verts = all_data["hand_verts"]
if split == 'val':
val_keep = 1500
if len(self.hand_joints) > val_keep:
keep_idx = np.random.choice(len(self.hand_joints), val_keep, replace=False)
if self.obman_data:
keep_hand_joints = []
keep_object_points = []
for idx in keep_idx:
keep_hand_joints.append(self.hand_joints[idx])
keep_object_points.append(self.object_points[idx])
self.hand_joints = keep_hand_joints
self.object_points = keep_object_points
else:
self.hand_joints = self.hand_joints[keep_idx]
self.object_points = self.object_points[keep_idx]
self.closest_point_idx = self.closest_point_idx[keep_idx]
self.closest_point_dist = self.closest_point_dist[keep_idx]
self.hand_verts = self.hand_verts[keep_idx]
if self.use_bps:
self.object_bps = self.object_bps[keep_idx]
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.hand_joints) # len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
data['mesh_path'] = ''
data['object_points'] = self.object_points[idx]
data['hand_joints'] = self.hand_joints[idx]
data['object_points'], data['hand_joints'], data['obj_center'] = self.obj_to_origin(data['object_points'], data['hand_joints'])
if not self.obman_data:
data['closest_point_idx'] = self.closest_point_idx[idx]
data['closest_point_dist'] = self.closest_point_dist[idx]
# hand verts
data['hand_verts'] = self.hand_verts[idx] - data['obj_center']
# Get points sampled inside the mesh for occupancy query
inside_points = self.sample_points[self.obj_names[idx]]
keep_idx = np.random.choice(len(inside_points), 600, replace=False)
inside_points = inside_points[keep_idx]
# inside_points = np.inside_points * self.rot_mats[idx].T
inside_points = np.matmul(self.rot_mats[idx], np.expand_dims(inside_points, -1)).squeeze(-1)
data['inside_points'] = inside_points - data['obj_center']
if self.use_bps:
data['object_bps'] = self.object_bps[idx]
else:
data = self.scale_to_cm(data)
data = self.gen_refine_training_data(data)
if self.return_idx:
data['idx'] = idx
return data
def gen_refine_training_data(self, data):
hand_joints, obj_points = data['hand_joints'], data['object_points']
mu = 0.0
scale = 0.5 # 2mm
noise = np.random.normal(mu, scale, 15 * 3)
noise = noise.reshape((15, 3))
noisy_joints = hand_joints.copy()
noisy_joints[6:] = noisy_joints[6:] + noise
trans_noise = np.random.rand() * 2.0 # 0.5
noisy_joints = noisy_joints + trans_noise
data['noisy_joints'] = noisy_joints
tip_idx = np.array([4, 8, 12, 16, 20])
p2p_dist = distance.cdist(noisy_joints, obj_points)
p2p_dist = p2p_dist.min(axis=1)
data['tip_dists'] = p2p_dist
return data
def obj_to_origin(self, object_points, hand_joints):
min_p = object_points.min(0)
max_p = object_points.max(0)
obj_center = (max_p + min_p) / 2.0
# print("obj center", obj_center)
return object_points - obj_center, hand_joints - obj_center, obj_center
def scale_to_cm(self, data_dict):
scale = 100.0
data_dict['object_points'] = data_dict['object_points'] * scale
data_dict['hand_joints'] = data_dict['hand_joints'] * scale
# Hand verts
data_dict['hand_verts'] = data_dict['hand_verts'] * scale
return data_dict
def get_model_dict(self, idx):
return self.models[idx]
def test_model_complete(self, category, model):
''' Tests if model is complete.
Args:
model (str): modelname
'''
model_path = os.path.join(self.dataset_folder, category, model)
files = os.listdir(model_path)
for field_name, field in self.fields.items():
if not field.check_complete(files):
logger.warn('Field "%s" is incomplete: %s'
% (field_name, model_path))
return False
return True
| 7,466 | 38.094241 | 135 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/AIRnet.py | '''
AIR-Nets
Author: Simon Giebenhain
Code: https://github.com/SimonGiebenhain/AIR-Nets
'''
import torch
import torch.nn as nn
import torch.nn.functional as functional
import numpy as np
from time import time
import torch.nn.functional as F
import os
import math
import dependencies.airnets.pointnet2_ops_lib.pointnet2_ops.pointnet2_utils as pointnet2_utils
def fibonacci_sphere(samples=1):
'''
Code from https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
Args:
samples: number of samples
Returns:
Points evenly distributed on the unit sphere
'''
points = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
points.append(np.array([x, y, z]))
return np.stack(points, axis=0)
def square_distance(src, dst):
"""
Code from: https://github.com/qq456cvb/Point-Transformers/blob/master/pointnet_util.py
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1)
def index_points(points, idx):
"""
Code from: https://github.com/qq456cvb/Point-Transformers/blob/master/pointnet_util.py
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S, [K]]
Return:
new_points:, indexed points data, [B, S, [K], C]
"""
raw_size = idx.size()
idx = idx.reshape(raw_size[0], -1)
res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))
return res.reshape(*raw_size, -1)
class TransitionDown(nn.Module):
"""
High-level wrapper for different downsampling mechanisms (also called set abstraction mechanisms).
In general the point cloud is subsampled to produce a lower cardinality point cloud (usualy using farthest point
sampling (FPS) ). Around each of the resulting points (called central points here) a local neighborhood is
formed, from which features are aggregated. How features are aggregated can differ, usually this is based on
maxpooling. This work introduces an attention based alternative.
Attributes:
npoint: desired number of points for outpout point cloud
nneigh: size of neighborhood
dim: number of dimensions of input and interal dimensions
type: decides which method to use, options are 'attentive' and 'maxpool'
"""
def __init__(self, npoint, nneighbor, dim, type='attentive') -> None:
super().__init__()
if type == 'attentive':
self.sa = TransformerSetAbstraction(npoint, nneighbor, dim)
elif type == 'maxpool':
self.sa = PointNetSetAbstraction(npoint, nneighbor, dim, dim)
else:
raise ValueError('Set Abstraction type ' + type + ' unknown!')
def forward(self, xyz, feats):
"""
Executes the downsampling (set abstraction)
:param xyz: positions of points
:param feats: features of points
:return: downsampled version, tuple of (xyz_new, feats_new)
"""
ret = self.sa(xyz, feats)
return ret
class TransformerBlock(nn.Module):
"""
Module for local and global vector self attention, as proposed in the Point Transformer paper.
Attributes:
d_model (int): number of input, output and internal dimensions
k (int): number of points among which local attention is calculated
pos_only (bool): When set to True only positional features are used
group_all (bool): When true full instead of local attention is calculated
"""
def __init__(self, d_model, k, pos_only=False, group_all=False) -> None:
super().__init__()
self.pos_only = pos_only
self.bn = nn.BatchNorm1d(d_model)
self.fc_delta = nn.Sequential(
nn.Linear(3, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.fc_gamma = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.w_qs = nn.Linear(d_model, d_model, bias=False)
self.w_ks = nn.Linear(d_model, d_model, bias=False)
self.w_vs = nn.Linear(d_model, d_model, bias=False)
self.k = k
self.group_all = group_all
def forward(self, xyz, feats=None):
"""
:param xyz [b x n x 3]: positions in point cloud
:param feats [b x n x d]: features in point cloud
:return:
new_features [b x n x d]:
"""
with torch.no_grad():
# full attention
if self.group_all:
b, n, _ = xyz.shape
knn_idx = torch.arange(n, device=xyz.device).unsqueeze(0).unsqueeze(1).repeat(b, n, 1)
# local attention using KNN
else:
dists = square_distance(xyz, xyz)
knn_idx = dists.argsort()[:, :, :self.k] # b x n x k
knn_xyz = index_points(xyz, knn_idx)
if not self.pos_only:
ori_feats = feats
x = feats
q_attn = self.w_qs(x)
k_attn = index_points(self.w_ks(x), knn_idx)
v_attn = index_points(self.w_vs(x), knn_idx)
pos_encode = self.fc_delta(xyz[:, :, None] - knn_xyz) # b x n x k x d
if not self.pos_only:
attn = self.fc_gamma(q_attn[:, :, None] - k_attn + pos_encode)
else:
attn = self.fc_gamma(pos_encode)
attn = functional.softmax(attn, dim=-2) # b x n x k x d
if not self.pos_only:
res = torch.einsum('bmnf,bmnf->bmf', attn, v_attn + pos_encode)
else:
res = torch.einsum('bmnf,bmnf->bmf', attn, pos_encode)
if not self.pos_only:
res = res + ori_feats
res = self.bn(res.permute(0, 2, 1)).permute(0, 2, 1)
return res
class CrossTransformerBlock(nn.Module):
def __init__(self, dim_inp, dim, nneigh=7, reduce_dim=True, separate_delta=True):
super().__init__()
# dim_inp = dim
# dim = dim # // 2
self.dim = dim
self.nneigh = nneigh
self.separate_delta = separate_delta
self.fc_delta = nn.Sequential(
nn.Linear(3, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
#if self.separate_delta:
# self.fc_delta2 = nn.Sequential(
# nn.Linear(3, dim),
# nn.ReLU(),
# nn.Linear(dim, dim)
#
# )
self.fc_gamma = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
self.w_k_global = nn.Linear(dim_inp, dim, bias=False)
self.w_v_global = nn.Linear(dim_inp, dim, bias=False)
self.w_qs = nn.Linear(dim_inp, dim, bias=False)
self.w_ks = nn.Linear(dim_inp, dim, bias=False)
self.w_vs = nn.Linear(dim_inp, dim, bias=False)
if not reduce_dim:
self.fc = nn.Linear(dim, dim_inp)
self.reduce_dim = reduce_dim
# xyz_q: B x n_queries x 3
# lat_rep: B x dim
# xyz: B x n_anchors x 3,
# points: B x n_anchors x dim
def forward(self, xyz_q, lat_rep, xyz, points):
with torch.no_grad():
dists = square_distance(xyz_q, xyz)
## knn group
knn_idx = dists.argsort()[:, :, :self.nneigh] # b x nQ x k
#print(knn_idx.shape)
#knn = KNN(k=self.nneigh, transpose_mode=True)
#_, knn_idx = knn(xyz, xyz_q) # B x npoint x K
##
#print(knn_idx.shape)
b, nQ, _ = xyz_q.shape
# b, nK, dim = points.shape
if len(lat_rep.shape) == 2:
q_attn = self.w_qs(lat_rep).unsqueeze(1).repeat(1, nQ, 1)
k_global = self.w_k_global(lat_rep).unsqueeze(1).repeat(1, nQ, 1).unsqueeze(2)
v_global = self.w_v_global(lat_rep).unsqueeze(1).repeat(1, nQ, 1).unsqueeze(2)
else:
q_attn = self.w_qs(lat_rep)
k_global = self.w_k_global(lat_rep).unsqueeze(2)
v_global = self.w_v_global(lat_rep).unsqueeze(2)
k_attn = index_points(self.w_ks(points),
knn_idx) # b, nQ, k, dim # self.w_ks(points).unsqueeze(1).repeat(1, nQ, 1, 1)
k_attn = torch.cat([k_attn, k_global], dim=2)
v_attn = index_points(self.w_vs(points), knn_idx) # #self.w_vs(points).unsqueeze(1).repeat(1, nQ, 1, 1)
v_attn = torch.cat([v_attn, v_global], dim=2)
xyz = index_points(xyz, knn_idx) # xyz = xyz.unsqueeze(1).repeat(1, nQ, 1, 1)
pos_encode = self.fc_delta(xyz_q[:, :, None] - xyz) # b x nQ x k x dim
pos_encode = torch.cat([pos_encode, torch.zeros([b, nQ, 1, self.dim], device=pos_encode.device)],
dim=2) # b, nQ, k+1, dim
if self.separate_delta:
pos_encode2 = self.fc_delta(xyz_q[:, :, None] - xyz) # b x nQ x k x dim
pos_encode2 = torch.cat([pos_encode2, torch.zeros([b, nQ, 1, self.dim], device=pos_encode2.device)],
dim=2) # b, nQ, k+1, dim
else:
pos_encode2 = pos_encode
attn = self.fc_gamma(q_attn[:, :, None] - k_attn + pos_encode)
attn = functional.softmax(attn, dim=-2) # b x nQ x k+1 x dim
res = torch.einsum('bmnf,bmnf->bmf', attn, v_attn + pos_encode2) # b x nQ x dim
if not self.reduce_dim:
res = self.fc(res)
return res
class ElementwiseMLP(nn.Module):
"""
Simple MLP, consisting of two linear layers, a skip connection and batch norm.
More specifically: linear -> BN -> ReLU -> linear -> BN -> ReLU -> resCon -> BN
Sorry for that many norm layers. I'm sure not all are needed!
At some point it was just too late to change it to something proper!
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv1d(dim, dim, 1)
self.bn1 = nn.BatchNorm1d(dim)
self.conv2 = nn.Conv1d(dim, dim, 1)
self.bn2 = nn.BatchNorm1d(dim)
self.bn3 = nn.BatchNorm1d(dim)
def forward(self, x):
"""
:param x: [B x n x d]
:return: [B x n x d]
"""
x = x.permute(0, 2, 1)
return self.bn3(x + F.relu(self.bn2(self.conv2(F.relu(self.bn1(self.conv1(x))))))).permute(0, 2, 1)
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Copied from https://github.com/autonomousvision/convolutional_occupancy_networks
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class PointNetSetAbstraction(nn.Module):
"""
Set Abstraction Module, as used in PointNet++
Uses FPS for downsampling, kNN groupings and maxpooling to abstract the group/neighborhood
Attributes:
npoint (int): Output cardinality
nneigh (int): Size of local grouings/neighborhoods
in_channel (int): input dimensionality
dim (int): internal and output dimensionality
"""
def __init__(self, npoint, nneigh, in_channel, dim):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.nneigh = nneigh
self.fc1 = nn.Linear(in_channel, dim)
self.conv1 = nn.Conv1d(dim, dim, 1)
self.conv2 = nn.Conv1d(dim, dim, 1)
self.bn1 = nn.BatchNorm1d(dim)
self.bn2 = nn.BatchNorm1d(dim)
self.bn = nn.BatchNorm1d(dim)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, C]
Return:
new_xyz: sampled points position data, [B, S, C]
new_points_concat: sample points feature data, [B, S, D']
"""
with torch.no_grad():
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint).long()
new_xyz = index_points(xyz, fps_idx)
points = self.fc1(points)
points_ori = index_points(points, fps_idx)
points = points.permute(0, 2, 1)
points = points + F.relu(self.bn2(self.conv2(F.relu(self.bn1(self.conv1(points))))))
points = points.permute(0, 2, 1)
with torch.no_grad():
dists = square_distance(new_xyz, xyz) # B x npoint x N
idx = dists.argsort()[:, :, :self.nneigh] # B x npoint x K
grouped_points = index_points(points, idx)
new_points = points_ori + torch.max(grouped_points, 2)[0]
new_points = self.bn(new_points.permute(0, 2, 1)).permute(0, 2, 1)
return new_xyz, new_points
#TODO: can I share some code with PTB??
class TransformerSetAbstraction(nn.Module):
"""
Newly proposed attention based set abstraction module.
Uses cross attention from central point to its neighbors instead of maxpooling.
Attributes:
npoint (int): Output cardinality of point cloud
nneigh (int): size of neighborhoods
dim (int): input, internal and output dimensionality
"""
def __init__(self, npoint, nneigh, dim):
super(TransformerSetAbstraction, self).__init__()
self.npoint = npoint
self.nneigh = nneigh
self.bnorm0 = nn.BatchNorm1d(dim)
self.bnorm1 = nn.BatchNorm1d(dim)
self.bnorm2 = nn.BatchNorm1d(dim)
self.bn1 = nn.BatchNorm1d(dim)
self.conv1 = nn.Conv1d(dim, dim, 1)
self.conv2 = nn.Conv1d(dim, dim, 1)
self.fc_delta1 = nn.Sequential(
nn.Linear(3, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
self.fc_gamma1 = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
self.fc_gamma2 = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
self.w_qs = nn.Linear(dim, dim, bias=False)
self.w_ks = nn.Linear(dim, dim, bias=False)
self.w_vs = nn.Linear(dim, dim, bias=False)
self.w_qs2 = nn.Linear(dim, dim, bias=False)
self.w_ks2 = nn.Linear(dim, dim, bias=False)
self.w_vs2 = nn.Linear(dim, dim, bias=False)
def forward(self, xyz, points):
"""
Input: featureized point clouds of cardinality N
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, dim]
Return: downsampled point cloud of cardinality npoint
new_xyz: sampled points position data, [B, npoint, 3]
new_points_concat: sample points feature data, [B, npoint, dim]
"""
B, N, C = xyz.shape
with torch.no_grad():
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = index_points(xyz, fps_idx.long())
with torch.no_grad():
dists = square_distance(new_xyz, xyz) # B x npoint x N
idx = dists.argsort()[:, :, :self.nneigh] # B x npoint x K
q_attn = index_points(self.w_qs(points), fps_idx.long())
k_attn = index_points(self.w_ks(points), idx)
v_attn = index_points(self.w_vs(points), idx)
grouped_xyz = index_points(xyz, idx)
pos_encode = self.fc_delta1(grouped_xyz - new_xyz.view(B, self.npoint, 1, C)) # b x n x k x f
attn = self.fc_gamma1(q_attn[:, :, None] - k_attn + pos_encode)
attn = functional.softmax(attn, dim=-2) # b x n x k x f
res1 = torch.einsum('bmnf,bmnf->bmf', attn, v_attn + pos_encode)
res1 = res1 + self.conv2(F.relu(self.bn1(self.conv1(res1.permute(0, 2, 1))))).permute(0, 2, 1)
res1 = self.bnorm0(res1.permute(0, 2, 1)).permute(0, 2, 1)
q_attn = self.w_qs2(res1)
k_attn = index_points(self.w_ks2(points), idx)
v_attn = index_points(self.w_vs2(points), idx)
attn = self.fc_gamma2(q_attn[:, :, None] - k_attn + pos_encode)
attn = functional.softmax(attn, dim=-2) # b x n x k x f
res2 = torch.einsum('bmnf,bmnf->bmf', attn, v_attn + pos_encode)
new_points = self.bnorm1((res1 + res2).permute(0, 2, 1)).permute(0, 2, 1)
new_points = new_points + index_points(points, fps_idx.long())
new_points = self.bnorm2(new_points.permute(0, 2, 1)).permute(0, 2, 1)
return new_xyz, new_points
class PointTransformerEncoderV2(nn.Module):
"""
AIR-Net encoder.
Attributes:
npoints_per_layer [int]: cardinalities of point clouds for each layer
nneighbor int: number of neighbors in local vector attention (also in TransformerSetAbstraction)
nneighbor_reduced int: number of neighbors in very first TransformerBlock
nfinal_transformers int: number of full attention layers
d_transformer int: dimensionality of model
d_reduced int: dimensionality of very first layers
full_SA bool: if False, local self attention is used in final layers
has_features bool: True, when input has signed-distance value for each point
"""
def __init__(self, npoints_per_layer, nneighbor, nneighbor_reduced, nfinal_transformers,
d_transformer, d_reduced,
full_SA=False, has_features=False):
super().__init__()
self.d_reduced = d_reduced
self.d_transformer = d_transformer
self.has_features = has_features
self.fc_middle = nn.Sequential(
nn.Linear(d_transformer, d_transformer),
nn.ReLU(),
nn.Linear(d_transformer, d_transformer)
)
if self.has_features:
self.enc_sdf = nn.Linear(32+2, d_reduced)
self.transformer_begin = TransformerBlock(d_reduced, nneighbor_reduced,
pos_only=not self.has_features)
self.transition_downs = nn.ModuleList()
self.transformer_downs = nn.ModuleList()
self.elementwise = nn.ModuleList()
#self.transformer_downs2 = nn.ModuleList() #compensate
#self.elementwise2 = nn.ModuleList() # compensate
self.elementwise_extras = nn.ModuleList()
if not d_reduced == d_transformer:
self.fc1 = nn.Linear(d_reduced, d_transformer)
for i in range(len(npoints_per_layer) - 1):
old_npoints = npoints_per_layer[i]
new_npoints = npoints_per_layer[i + 1]
if i == 0:
dim = d_reduced
else:
dim = d_transformer
self.transition_downs.append(
TransitionDown(new_npoints, min(nneighbor, old_npoints), dim) # , type='single_step') #, type='maxpool')#, type='single_step')
)
self.elementwise_extras.append(ElementwiseMLP(dim))
self.transformer_downs.append(
TransformerBlock(dim, min(nneighbor, new_npoints))
)
self.elementwise.append(ElementwiseMLP(d_transformer))
#self.transformer_downs2.append(
# TransformerBlock(dim, min(nneighbor, new_npoints))
#) # compensate
#self.elementwise2.append(ElementwiseMLP(dim)) # compensate
self.final_transformers = nn.ModuleList()
self.final_elementwise = nn.ModuleList()
for i in range(nfinal_transformers):
self.final_transformers.append(
TransformerBlock(d_transformer, 2 * nneighbor, group_all=full_SA)
)
for i in range(nfinal_transformers):
self.final_elementwise.append(
ElementwiseMLP(dim=d_transformer)
)
def forward(self, xyz, intermediate_out_path=None):
"""
:param xyz [B x n x 3] (or [B x n x 4], but then has_features=True): input point cloud
:param intermediate_out_path: path to store point cloud after every deformation to
:return: global latent representation [b x d_transformer]
xyz [B x npoints_per_layer[-1] x d_transformer]: anchor positions
feats [B x npoints_per_layer[-1] x d_transformer: local latent vectors
"""
if intermediate_out_path is not None:
intermediates = {}
intermediates['Input'] = xyz[0, :, :].cpu().numpy()
if self.has_features:
#print(xyz[:, :, 3:].shape)
feats = self.enc_sdf(xyz[:, :, 3:])
xyz = xyz[:, :, :3].contiguous()
feats = self.transformer_begin(xyz, feats)
else:
feats = self.transformer_begin(xyz)
for i in range(len(self.transition_downs)):
xyz, feats = self.transition_downs[i](xyz, feats)
if intermediate_out_path is not None:
intermediates['SetAbs{}'.format(i)] = xyz[0, :, :].cpu().numpy()
feats = self.elementwise_extras[i](feats)
feats = self.transformer_downs[i](xyz, feats)
if intermediate_out_path is not None:
intermediates['PTB{}'.format(i)] = xyz[0, :, :].cpu().numpy()
#feats = self.transformer_downs2[i](xyz, feats) #compensate: dense
#feats = self.elementwise2[i](feats) #compensate: dense
if i == 0 and not self.d_reduced == self.d_transformer:
feats = self.fc1(feats)
feats = self.elementwise[i](feats)
#feats = self.transformer_downs2[i](xyz, feats) #compensate: sparse
#feats = self.elementwise2[i](feats) #compensate: sparse
for i, att_block in enumerate(self.final_transformers):
feats = att_block(xyz, feats)
#if i < len(self.final_elementwise):
feats = self.final_elementwise[i](feats)
if intermediate_out_path is not None:
intermediates['fullPTB{}'.format(i)] = xyz[0, :, :].cpu().numpy()
if intermediate_out_path is not None:
if not os.path.exists(intermediate_out_path):
os.makedirs(intermediate_out_path)
np.savez(intermediate_out_path + '/intermediate_pcs.npz', **intermediates)
# max pooling
lat_vec = feats.max(dim=1)[0]
return {'z': self.fc_middle(lat_vec), 'anchors': xyz, 'anchor_feats': feats}
class PointNetEncoder(nn.Module):
"""
PointNet++-style encoder. Used in ablation experiments.
Attributes:
npoints_per_layer [int]: cardinality of point cloud for each layer
nneighbor int: number of neighbors for set abstraction
d_transformer int: internal dimensions
"""
def __init__(self, npoints_per_layer, nneighbor, d_transformer, nfinal_transformers):
super().__init__()
self.d_transformer = d_transformer
self.fc_middle = nn.Sequential(
nn.Linear(d_transformer, d_transformer),
nn.ReLU(),
nn.Linear(d_transformer, d_transformer)
)
self.fc_begin = nn.Sequential(
nn.Linear(3, d_transformer),
nn.ReLU(),
nn.Linear(d_transformer, d_transformer)
)
self.transition_downs = nn.ModuleList()
self.elementwise = nn.ModuleList()
for i in range(len(npoints_per_layer) - 1):
old_npoints = npoints_per_layer[i]
new_npoints = npoints_per_layer[i + 1]
self.transition_downs.append(
TransitionDown(new_npoints, min(nneighbor, old_npoints), d_transformer, type='maxpool')
)
self.elementwise.append(ElementwiseMLP(d_transformer))
# full self attention layers
self.final_transformers = nn.ModuleList()
self.final_elementwise = nn.ModuleList()
for i in range(nfinal_transformers):
self.final_transformers.append(
TransformerBlock(d_transformer, -1, group_all=True)
)
for i in range(nfinal_transformers):
self.final_elementwise.append(
ElementwiseMLP(dim=d_transformer)
)
def forward(self, xyz):
"""
:param xyz [B x n x 3] (or [B x n x 4], but then has_features=True): input point cloud
:param intermediate_out_path: path to store point cloud after every deformation to
:return: global latent representation [b x d_transformer]
xyz [B x npoints_per_layer[-1] x d_transformer]: anchor positions
feats [B x npoints_per_layer[-1] x d_transformer: local latent vectors
"""
feats = self.fc_begin(xyz)
for i in range(len(self.transition_downs)):
xyz, feats = self.transition_downs[i](xyz, feats)
feats = self.elementwise[i](feats)
for i, att_block in enumerate(self.final_transformers):
feats = att_block(xyz, feats)
feats = self.final_elementwise[i](feats)
# max pooling
lat_vec = feats.max(dim=1)[0]
return {'z': self.fc_middle(lat_vec), 'anchors': xyz, 'anchor_feats': feats}
class PointTransformerDecoderOcc(nn.Module):
"""
AIR-Net decoder
Attributes:
dim_inp int: dimensionality of encoding (global and local latent vectors)
dim int: internal dimensionality
nneigh int: number of nearest anchor points to draw information from
hidden_dim int: hidden dimensionality of final feed-forward network
n_blocks int: number of blocks in feed forward network
"""
def __init__(self, dim_inp, dim, nneigh=7, hidden_dim=64, n_blocks=5, return_feature=False):
super().__init__()
self.dim = dim
self.n_blocks = n_blocks
self.return_feature = return_feature
self.ct1 = CrossTransformerBlock(dim_inp, dim, nneigh=nneigh)
#self.fc_glob = nn.Linear(dim_inp, dim)
# WARNING! Ablation
self.init_enc = nn.Linear(dim+64, hidden_dim)
#self.init_enc = nn.Linear(dim+32, hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.ModuleList([
nn.Linear(dim, hidden_dim) for i in range(n_blocks)
])
self.fc_out = nn.Linear(hidden_dim, 1)
self.actvn = F.tanh
def forward(self, xyz_q, encoding):
"""
TODO update commont to include encoding dict
:param xyz_q [B x n_queries x 3]: queried 3D coordinates
:param lat_rep [B x dim_inp]: global latent vectors
:param xyz [B x n_anchors x 3]: anchor positions
:param feats [B x n_anchros x dim_inp]: local latent vectors
:return: occ [B x n_queries]: occupancy probability for each queried 3D coordinate
"""
lat_rep = encoding['z']
xyz = encoding['anchors']
feats = encoding['anchor_feats']
xyz_q, xyz_q_feat = xyz_q[:, :, :3], xyz_q[:, :, 3:]
lat_rep = self.ct1(xyz_q, lat_rep, xyz, feats) # + self.fc_glob(lat_rep).unsqueeze(1).repeat(1, xyz_q.shape[1], 1) +
cat_lat_rep = torch.cat((lat_rep, xyz_q_feat), dim=2)
net = self.init_enc(cat_lat_rep)
# incorporate it here
for i in range(self.n_blocks):
net = net + self.fc_c[i](lat_rep)
net = self.blocks[i](net)
if not self.return_feature:
occ = self.fc_out(self.actvn(net))
else:
#occ = net
occ = self.fc_out(net)
return occ
class PointTransformerDecoderInterp(nn.Module):
"""
Decoder based in interpolation features between local latent vectors.
Gaussian Kernel regression is used for the interpolation of features.
Coda adapted from https://github.com/autonomousvision/convolutional_occupancy_networks
Attributes:
dim_inp: input dimensionality
hidden_dim: dimensionality for feed-forward network
n_blocks: number of blocks in feed worward network
var (float): variance for gaussian kernel
"""
def __init__(self, dim_inp, dim, hidden_dim=50, n_blocks=5):
super().__init__()
self.n_blocks = n_blocks
self.fc0 = nn.Linear(dim_inp, dim)
self.fc1 = nn.Linear(dim, hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.ModuleList([
nn.Linear(dim, hidden_dim) for i in range(n_blocks)
])
self.fc_out = nn.Linear(hidden_dim, 1)
self.actvn = F.relu
self.var = 0.2**2
def sample_point_feature(self, q, p, fea):
# q: B x M x 3
# p: B x N x 3
# fea: B x N x c_dim
# p, fea = c
# distance betweeen each query point to the point cloud
dist = -((p.unsqueeze(1).expand(-1, q.size(1), -1, -1) - q.unsqueeze(2)).norm(dim=3) + 10e-6) ** 2
weight = (dist / self.var).exp() # Guassian kernel
# weight normalization
weight = weight / weight.sum(dim=2).unsqueeze(-1)
c_out = weight @ fea # B x M x c_dim
return c_out
def forward(self, xyz_q, encoding):
"""
:param xyz_q [B x n_quries x 3]: queried 3D positions
:param xyz [B x n_anchors x 3]: anchor positions
:param feats [B x n_anchors x dim_inp]: anchor features
:return: occ [B x n_queries]: occupancy predictions/probabilites
"""
xyz = encoding['anchors']
feats = encoding['anchor_feats']
lat_rep = self.fc0(self.sample_point_feature(xyz_q, xyz, feats))
net = self.fc1(F.relu(lat_rep))
for i in range(self.n_blocks):
net = net + self.fc_c[i](lat_rep)
net = self.blocks[i](net)
occ = self.fc_out(self.actvn(net))
return occ
class PointTransformerDecoderLDIF(nn.Module):
"""
Decoder based in interpolation features between local latent vectors.
Gaussian Kernel regression is used for the interpolation of features.
Coda adapted from https://github.com/autonomousvision/convolutional_occupancy_networks
Attributes:
dim_inp: input dimensionality
hidden_dim: dimensionality for feed-forward network
n_blocks: number of blocks in feed worward network
var (float): variance for gaussian kernel
"""
def __init__(self, dim_inp, dim, hidden_dim=50, n_blocks=5):
super().__init__()
self.n_blocks = n_blocks
self.fc_sclae = nn.Linear(dim_inp, 3)
self.fc_rot = nn.Linear(dim_inp, 3)
self.fc0 = nn.Linear(dim_inp+3, dim)
self.fc1 = nn.Linear(dim, hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.ModuleList([
nn.Linear(dim, hidden_dim) for i in range(n_blocks)
])
self.fc_out = nn.Linear(hidden_dim, 1)
self.actvn = F.relu
def euler2mat(self, angles):
x_angle = angles[:, :, 0]
y_angle = angles[:, :, 1]
z_angle = angles[:, :, 2]
cosz = torch.cos(z_angle)
sinz = torch.sin(z_angle)
z_rot = torch.zeros_like(z_angle).unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, 3)
z_rot[:, :, 0, 0] = cosz
z_rot[:, :, 0, 1] = -sinz
z_rot[:, :, 1, 0] = sinz
z_rot[:, :, 1, 1] = cosz
z_rot[:, :, 2, 2] = 1
cosy = torch.cos(y_angle)
siny = torch.sin(y_angle)
y_rot = torch.zeros_like(y_angle).unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, 3)
y_rot[:, :, 0, 0] = cosy
y_rot[:, :, 0, 2] = siny
y_rot[:, :, 1, 1] = 1
y_rot[:, :, 2, 0] = -siny
y_rot[:, :, 2, 2] = cosy
rot = torch.matmul(z_rot, y_rot)
cosx = torch.cos(x_angle)
sinx = torch.sin(x_angle)
x_rot = torch.zeros_like(x_angle).unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, 3)
x_rot[:, :, 0, 0] = 1
x_rot[:, :, 1, 1] = cosx
x_rot[:, :, 1, 2] = -sinx
x_rot[:, :, 2, 1] = sinx
x_rot[:, :, 2, 2] = cosx
return torch.matmul(rot, x_rot)
def compute_weight(self, q, p, f):
# q: B x M x 3
# p: B x N x 3
# f: B x N x hidden_dim
#TODO: think of better activation function?
scale = torch.eye(3, device=f.device) * (0.005 + F.sigmoid(self.fc_sclae(f)).unsqueeze(-1).repeat(1, 1, 1, 3)) # B x N x 3 x 3
rot = self.euler2mat(2*math.pi * F.sigmoid(self.fc_rot(f))) # B x N x 3 -> B x N x 3 x 3
#cov = torch.matmul(scale, rot)
cov = scale
cov_inv = torch.inverse(cov)
cov_det = torch.det(cov)
#distance betweeen each query point to the point cloud
delta = p.unsqueeze(1).expand(-1, q.size(1), -1, -1) - q.unsqueeze(2)
tmp = torch.matmul(cov_inv.unsqueeze(1), delta.unsqueeze(-1)).squeeze()
#m = torch.matmul(delta.unsqueeze(-2), tmp).squeeze()
m = (delta * tmp).sum(-1)
dist = (-1/2*m).exp()
weight = (dist / (2*math.pi)**3 * cov_det.unsqueeze(1)) # B x M x N
# weight normalization
#weight = weight / weight.sum(dim=2).unsqueeze(-1)
return weight, delta, rot
def forward(self, xyz_q, encoding):
"""
:param xyz_q [B x n_quries x 3]: queried 3D positions
:param xyz [B x n_anchors x 3]: anchor positions
:param feats [B x n_anchors x dim_inp]: anchor features
:return: occ [B x n_queries]: occupancy predictions/probabilites
"""
xyz = encoding['anchors']
feats = encoding['anchor_feats']
weights, delta, rot = self.compute_weight(xyz_q, xyz, feats) # B x n_q x n_a
feats = feats.unsqueeze(1).repeat(1, xyz_q.shape[1], 1, 1)
loc_coord = torch.matmul(rot.unsqueeze(1), delta.unsqueeze(-1)).squeeze()
#loc_coord = delta.squeeze()
feats = torch.cat([feats.squeeze(), loc_coord], dim=-1)
lat_rep = self.fc0(feats)
net = self.fc1(F.relu(lat_rep))
for i in range(self.n_blocks):
net = net + self.fc_c[i](lat_rep)
net = self.blocks[i](net)
occs = self.fc_out(self.actvn(net)).squeeze() # B x n_q x n_a
occ = (occs * weights).sum(dim=-1)
return occ
def get_encoder(CFG):
CFG_enc = CFG['encoder']
if CFG_enc['type'] == 'airnet':
encoder = PointTransformerEncoderV2(npoints_per_layer=CFG_enc['npoints_per_layer'],
nneighbor=CFG_enc['encoder_nneigh'],
nneighbor_reduced=CFG_enc['encoder_nneigh_reduced'],
nfinal_transformers=CFG_enc['nfinal_trans'],
d_transformer=CFG_enc['encoder_attn_dim'],
d_reduced=CFG_enc['encoder_attn_dim_reduced'],
full_SA=CFG_enc.get('full_SA', True))
elif CFG_enc['type'] == 'pointnet++':
encoder = PointNetEncoder(npoints_per_layer=CFG_enc['npoints_per_layer'],
nneighbor=CFG_enc['encoder_nneigh'],
d_transformer=CFG_enc['encoder_attn_dim'],
nfinal_transformers=CFG_enc['nfinal_transformers'])
else:
raise ValueError('Unrecognized encoder type: ' + CFG_enc['type'])
return encoder
def get_decoder(CFG):
CFG_enc = CFG['encoder']
CFG_dec = CFG['decoder']
if CFG_dec['type'] == 'airnet':
decoder = PointTransformerDecoderOcc(dim_inp=CFG_enc['encoder_attn_dim'],
dim=CFG_dec['decoder_attn_dim'],
nneigh=CFG_dec['decoder_nneigh'],
hidden_dim=CFG_dec['decoder_hidden_dim'])
elif CFG_dec['type'] == 'interp':
print('Using interpolation-based decoder!')
decoder = PointTransformerDecoderInterp(dim_inp=CFG_enc['encoder_attn_dim'],
dim=CFG_dec['decoder_attn_dim'], #TODO remove this unnecessary param
hidden_dim=CFG_dec['decoder_hidden_dim'])
elif CFG_dec['type'] == 'ldif':
print('Using interpolation-based decoder!')
decoder = PointTransformerDecoderLDIF(dim_inp=CFG_enc['encoder_attn_dim'],
dim=CFG_dec['decoder_attn_dim'], #TODO ??remove this unnecessary param??
hidden_dim=CFG_dec['decoder_hidden_dim'])
else:
raise ValueError('Decoder type "{}" not implemented!'.format(CFG_dec['type']))
return decoder
| 38,269 | 35.692234 | 143 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/pointnet2_ops_lib/setup.py | import glob
import os
import os.path as osp
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = osp.dirname(osp.abspath(__file__))
_ext_src_root = osp.join("pointnet2_ops", "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
requirements = ["torch>=1.4"]
exec(open(osp.join("pointnet2_ops", "_version.py")).read())
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
setup(
name="pointnet2_ops",
version=__version__,
author="Erik Wijmans",
packages=find_packages(),
install_requires=requirements,
ext_modules=[
CUDAExtension(
name="pointnet2_ops._ext",
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[osp.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={"build_ext": BuildExtension},
include_package_data=True,
)
| 1,185 | 28.65 | 78 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py | import torch
import torch.nn as nn
import warnings
from torch.autograd import Function
from typing import *
try:
import pointnet2_ops._ext as _ext
except ImportError:
from torch.utils.cpp_extension import load
import glob
import os.path as osp
import os
warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.")
_ext_src_root = osp.join(osp.dirname(__file__), "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
_ext = load(
"_ext",
sources=_ext_sources,
extra_include_paths=[osp.join(_ext_src_root, "include")],
extra_cflags=["-O3"],
extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"],
with_cuda=True,
)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return dist, idx
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, features = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, torch.zeros_like(idx), torch.zeros_like(weight)
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, torch.zeros_like(idx)
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 10,396 | 26.360526 | 103 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/pointnet2_ops_lib/pointnet2_ops/pointnet2_modules.py | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(
self, xyz: torch.Tensor, features: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
| 6,530 | 30.1 | 106 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/pointnet2_ops_lib/build/lib.linux-x86_64-3.8/pointnet2_ops/pointnet2_utils.py | import torch
import torch.nn as nn
import warnings
from torch.autograd import Function
from typing import *
try:
import pointnet2_ops._ext as _ext
except ImportError:
from torch.utils.cpp_extension import load
import glob
import os.path as osp
import os
warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.")
_ext_src_root = osp.join(osp.dirname(__file__), "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
_ext = load(
"_ext",
sources=_ext_sources,
extra_include_paths=[osp.join(_ext_src_root, "include")],
extra_cflags=["-O3"],
extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"],
with_cuda=True,
)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return dist, idx
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, features = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, torch.zeros_like(idx), torch.zeros_like(weight)
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, torch.zeros_like(idx)
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 10,396 | 26.360526 | 103 | py |
Im2Hands | Im2Hands-main/dependencies/airnets/pointnet2_ops_lib/build/lib.linux-x86_64-3.8/pointnet2_ops/pointnet2_modules.py | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers)
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(
self, xyz: torch.Tensor, features: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
# type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(build_shared_mlp(mlp_spec, bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
):
# type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
super(PointnetSAModule, self).__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, mlp, bn=True):
# type: (PointnetFPModule, List[int], bool) -> None
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
# type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*(known_feats.size()[0:2] + [unknown.size(1)])
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
| 6,530 | 30.1 | 106 | py |
Im2Hands | Im2Hands-main/artihand/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', initialize_from=None,
initialization_file_name='model_best.pt', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
self.initialize_from = initialize_from
self.initialization_file_name = initialization_file_name
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename, strict=True):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename, strict)
def load_file(self, filename, strict=True):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
if not strict:
state_dict = torch.load(filename)
self.module_dict['model'].load_state_dict(state_dict, strict=False)
elif 'init_occ.pt' in filename:
state_dict = torch.load(filename)['model']
self.module_dict['model'].load_state_dict(state_dict, strict=False)
old_state_dict = state_dict
state_dict = {'init_occ.' + k: v for k, v in old_state_dict.items()}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
elif 'ref_halo_baseline' in filename:
state_dict = torch.load(filename)['model']
self.module_dict['model'].load_state_dict(state_dict, strict=False)
old_state_dict = state_dict
state_dict = {k.replace('decoder.', 'left_refinement_decoder.'): v for k, v in old_state_dict.items() if 'decoder.' in k}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
state_dict = {k.replace('decoder.', 'right_refinement_decoder.'): v for k, v in old_state_dict.items() if 'decoder.' in k}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
elif 'halo_baseline' in filename:
state_dict = torch.load(filename)['model']
self.module_dict['model'].load_state_dict(state_dict, strict=False)
old_state_dict = state_dict
state_dict = {k.replace('decoder.', 'left_decoder.'): v for k, v in old_state_dict.items() if 'decoder.' in k}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
state_dict = {k.replace('decoder.', 'right_decoder.'): v for k, v in old_state_dict.items() if 'decoder.' in k}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
elif 'intaghand_baseline' in filename:
old_state_dict = torch.load(filename)
state_dict = {k.replace('module.encoder.', 'image_encoder.'): v for k, v in old_state_dict.items() if 'module.encoder' in k}
self.module_dict['model'].load_state_dict(state_dict, strict=False)
else:
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict, strict=strict)
return scalars
else:
if self.initialize_from is not None:
self.initialize_weights()
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict, strict=True):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
if k == 'model':
v.load_state_dict(state_dict[k], strict=strict)
else:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def initialize_weights(self):
''' Initializes the model weights from another model file.
'''
print('Intializing weights from model %s' % self.initialize_from)
filename_in = os.path.join(
self.initialize_from, self.initialization_file_name)
model_state_dict = self.module_dict.get('model').state_dict()
model_dict = self.module_dict.get('model').state_dict()
model_keys = set([k for (k, v) in model_dict.items()])
init_model_dict = torch.load(filename_in)['model']
init_model_k = set([k for (k, v) in init_model_dict.items()])
for k in model_keys:
if ((k in init_model_k) and (model_state_dict[k].shape ==
init_model_dict[k].shape)):
model_state_dict[k] = init_model_dict[k]
self.module_dict.get('model').load_state_dict(model_state_dict)
def is_url(url):
''' Checks if input is url.'''
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https')
| 6,735 | 37.056497 | 140 | py |
Im2Hands | Im2Hands-main/artihand/training.py | # from im2mesh import icp
import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader, subset=1):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 1,025 | 23.428571 | 65 | py |
Im2Hands | Im2Hands-main/artihand/config.py | import yaml
from torchvision import transforms
from artihand import data
from artihand import nasa
method_dict = {
'nasa': nasa
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.FullLoader )
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
# Dataset
def get_dataset(mode, cfg, splits=1, split_idx=0):
''' Returns the dataset.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
return_idx (bool): whether to include an ID field
'''
method = cfg['method']
dataset_type = cfg['data']['dataset']
dataset_folder = cfg['data']['path']
splits_num = splits
splits = {
'train': cfg['data']['train_split'],
'val': cfg['data']['val_split'],
'test': cfg['data']['test_split'],
}
split = splits[mode]
if dataset_type == 'init_occ_hands':
img_folder = cfg['data']['img_path']
# Method specific data
data_loader_helpers = method_dict[method].config.get_data_helpers(mode, cfg)
transforms_dict = method_dict[method].config.get_data_transforms(mode, cfg)
# Input data
input_helper = get_inputs_helper(mode, cfg)
if input_helper is not None:
data_loader_helpers['inputs'] = input_helper
dataset = data.InitOccSampleHandDataset(
img_folder, dataset_folder, data_loader_helpers,
transforms=transforms_dict,
split=split,
no_except=False,
subset=splits_num,
subset_idx=split_idx
)
elif dataset_type == 'ref_occ_hands':
img_folder = cfg['data']['img_path']
# Method specific data
data_loader_helpers = method_dict[method].config.get_data_helpers(mode, cfg)
transforms_dict = method_dict[method].config.get_data_transforms(mode, cfg)
# Input data
input_helper = get_inputs_helper(mode, cfg)
if input_helper is not None:
data_loader_helpers['inputs'] = input_helper
dataset = data.RefOccSampleHandDataset(
img_folder, dataset_folder, data_loader_helpers,
transforms=transforms_dict,
split=split,
no_except=False,
subset=splits_num,
subset_idx=split_idx
)
elif dataset_type == 'kpts_ref_hands':
img_folder = cfg['data']['img_path']
# Method specific data
data_loader_helpers = method_dict[method].config.get_data_helpers(mode, cfg)
transforms_dict = method_dict[method].config.get_data_transforms(mode, cfg)
# Input data
input_helper = get_inputs_helper(mode, cfg)
if input_helper is not None:
data_loader_helpers['inputs'] = input_helper
dataset = data.KptsRefSampleHandDataset(
img_folder, dataset_folder, data_loader_helpers,
transforms=transforms_dict,
split=split,
no_except=False,
subset=splits_num,
subset_idx=split_idx
)
else:
raise ValueError('Invalid dataset "%s"' % cfg['data']['dataset'])
return dataset
def get_inputs_helper(mode, cfg):
''' Returns the inputs fields.
Args:
mode (str): the mode which is used
cfg (dict): config dictionary
'''
input_type = cfg['data']['input_type']
with_transforms = cfg['data']['with_transforms']
if input_type is None:
inputs_field = None
elif input_type == 'trans_matrix':
if cfg['model']['use_sdf']:
inputs_helper = data.TransMatInputHelperSdf(
cfg['data']['transmat_file'],
use_bone_length=cfg['model']['use_bone_length'],
unpackbits=cfg['data']['points_unpackbits']
)
else:
inputs_helper = data.TransMatInputHelper(
cfg['data']['transmat_file'],
use_bone_length=cfg['model']['use_bone_length'],
unpackbits=cfg['data']['points_unpackbits']
)
else:
raise ValueError(
'Invalid input type (%s)' % input_type)
return inputs_helper
def get_preprocessor(cfg, dataset=None, device=None):
''' Returns preprocessor instance.
Args:
cfg (dict): config dictionary
dataset (dataset): dataset
device (device): pytorch device
'''
p_type = cfg['preprocessor']['type']
cfg_path = cfg['preprocessor']['config']
model_file = cfg['preprocessor']['model_file']
if p_type == 'psgn':
preprocessor = preprocess.PSGNPreprocessor(
cfg_path=cfg_path,
pointcloud_n=cfg['data']['pointcloud_n'],
dataset=dataset,
device=device,
model_file=model_file,
)
elif p_type is None:
preprocessor = None
else:
raise ValueError('Invalid Preprocessor %s' % p_type)
return preprocessor
| 7,339 | 27.449612 | 84 | py |
Im2Hands | Im2Hands-main/artihand/checkpoints_legacy.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 2,962 | 28.63 | 70 | py |
Im2Hands | Im2Hands-main/artihand/diff_operators.py | import torch
from torch.autograd import grad
def hessian(y, x):
''' hessian of y wrt x
y: shape (meta_batch_size, num_observations, channels)
x: shape (meta_batch_size, num_observations, 2)
'''
meta_batch_size, num_observations = y.shape[:2]
grad_y = torch.ones_like(y[..., 0]).to(y.device)
h = torch.zeros(meta_batch_size, num_observations, y.shape[-1], x.shape[-1], x.shape[-1]).to(y.device)
for i in range(y.shape[-1]):
# calculate dydx over batches for each feature value of y
dydx = grad(y[..., i], x, grad_y, create_graph=True)[0]
# calculate hessian on y for each x value
for j in range(x.shape[-1]):
h[..., i, j, :] = grad(dydx[..., j], x, grad_y, create_graph=True)[0][..., :]
status = 0
if torch.any(torch.isnan(h)):
status = -1
return h, status
def laplace(y, x):
grad = gradient(y, x)
return divergence(grad, x)
def divergence(y, x):
div = 0.
for i in range(y.shape[-1]):
div += grad(y[..., i], x, torch.ones_like(y[..., i]), create_graph=True)[0][..., i:i+1]
return div
def gradient(y, x, grad_outputs=None):
if grad_outputs is None:
grad_outputs = torch.ones_like(y)
grad = torch.autograd.grad(y, [x], grad_outputs=grad_outputs, create_graph=True)[0]
return grad
def jacobian(y, x):
''' jacobian of y wrt x '''
meta_batch_size, num_observations = y.shape[:2]
jac = torch.zeros(meta_batch_size, num_observations, y.shape[-1], x.shape[-1]).to(y.device) # (meta_batch_size*num_points, 2, 2)
for i in range(y.shape[-1]):
# calculate dydx over batches for each feature value of y
y_flat = y[...,i].view(-1, 1)
jac[:, :, i, :] = grad(y_flat, x, torch.ones_like(y_flat), create_graph=True)[0]
status = 0
if torch.any(torch.isnan(jac)):
status = -1
return jac, status
| 1,892 | 30.55 | 132 | py |
Im2Hands | Im2Hands-main/artihand/nasa/training.py | import os
from tqdm import trange
import torch
from torch.nn import functional as F
from torch import distributions as dist
from im2mesh.common import (
compute_iou, make_3d_grid
)
from artihand.utils import visualize as vis
from artihand.training import BaseTrainer
from artihand import diff_operators
# For dubugging
# from matplotlib import pyplot as plt
# import matplotlib
# matplotlib.use('TkAgg')
# from mpl_toolkits.mplot3d import Axes3D
# from artihand.utils.visualize import visualize_pointcloud
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
skinning_loss_weight (float): skinning loss weight for part model
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, skinning_loss_weight=0, device=None,
input_type='img', use_sdf=False, vis_dir=None, threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.skinning_loss_weight = skinning_loss_weight
self.use_sdf = use_sdf
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
self.threshold = threshold
self.eval_sample = eval_sample
self.mse_loss = torch.nn.MSELoss()
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss, loss_dict = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss_dict # loss.item()
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
# Data
# points = data.get('points').to(device)
# occ = data.get('occ').to(device)
inputs = data.get('inputs').to(device)
voxels_occ = data.get('voxels')
points_iou = data.get('points_iou.points').to(device)
occ_iou = data.get('points_iou.occ').to(device)
if self.model.use_bone_length:
bone_lengths = data.get('bone_lengths').to(device)
else:
bone_lengths = None
kwargs = {}
# with torch.no_grad():
# elbo, rec_error, kl = self.model.compute_elbo(
# points, occ, inputs, **kwargs)
# eval_dict['loss'] = -elbo.mean().item()
# eval_dict['rec_error'] = rec_error.mean().item()
# eval_dict['kl'] = kl.mean().item()
# Compute iou
batch_size = points_iou.size(0)
with torch.no_grad():
p_out = self.model(points_iou, inputs, bone_lengths=bone_lengths,
sample=self.eval_sample, **kwargs)
occ_iou_np = (occ_iou >= 0.5).cpu().numpy()
if self.use_sdf:
occ_iou_hat_np = (p_out <= threshold).cpu().numpy()
else:
occ_iou_hat_np = (p_out >= threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
# import pdb; pdb.set_trace()
# import numpy as np
# sample_ids = np.random.choice(occ_iou_np.shape[1], 1024)
# point_vis = points_iou[0, sample_ids].cpu().numpy()
# point_in = points_iou[0, occ_iou_np[0, :]]
# point_in_ids = np.random.choice(point_in.shape[0], 1024)
# point_in_vis = point_in[point_in_ids].cpu().numpy()
# point_pred_in = points_iou[0, occ_iou_hat_np[0, :]]
# point_pred_in_ids = np.random.choice(point_pred_in.shape[0], 1024)
# point_pred_in_vis = point_pred_in[point_pred_in_ids].cpu().numpy()
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# ax.scatter(point_vis[:, 2], point_vis[:, 0], point_vis[:, 1], color='b')
# ax.scatter(point_in_vis[:, 2], point_in_vis[:, 0], point_in_vis[:, 1], color='r')
# # ax.scatter(surface_points[:, 2], surface_points[:, 0], surface_points[:, 1], color='orange')
# plt.show()
# Estimate voxel iou
if voxels_occ is not None:
import pdb; pdb.set_trace()
voxels_occ = voxels_occ.to(device)
points_voxels = make_3d_grid(
(-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3)
points_voxels = points_voxels.expand(
batch_size, *points_voxels.size())
points_voxels = points_voxels.to(device)
with torch.no_grad():
p_out = self.model(points_voxels, inputs,
sample=self.eval_sample, **kwargs)
voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()
occ_hat_np = (p_out.probs >= threshold).cpu().numpy()
iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()
eval_dict['iou_voxels'] = iou_voxels
return eval_dict
def visualize(self, data):
''' Performs a visualization step for the data.
Args:
data (dict): data dictionary
'''
device = self.device
batch_size = data['inputs'].size(0)
inputs = data.get('inputs').to(device)
if self.model.use_bone_length:
bone_lengths = data.get('bone_lengths').to(device)
else:
bone_lengths = None
shape = (32, 32, 32)
# shape = (64, 64, 64)
p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
p = p.expand(batch_size, *p.size())
kwargs = {}
with torch.no_grad():
p_r = self.model(p, inputs, bone_lengths=bone_lengths, sample=self.eval_sample, **kwargs)
occ_hat = p_r.view(batch_size, *shape)
if self.use_sdf:
voxels_out = (occ_hat <= self.threshold).cpu().numpy()
else:
voxels_out = (occ_hat >= self.threshold).cpu().numpy()
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
# vis.visualize_data(
# inputs[i].cpu(), self.input_type, input_img_path)
vis.visualize_voxels(
voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
def compute_skinning_loss(self, c, data, bone_lengths=None):
''' Computes skinning loss for part-base regularization.
Args:
c (tensor): encoded latent vector
data (dict): data dictionary
bone_lengths (tensor): bone lengths
'''
device = self.device
p = data.get('mesh_verts').to(device)
labels = data.get('mesh_vert_labels').to(device)
batch_size, points_size, p_dim = p.size()
kwargs = {}
pred = self.model.decode(p, c, bone_lengths=bone_lengths, reduce_part=False, **kwargs)
labels = labels.long()
# print("label", labels.size(), labels.type())
if self.use_sdf:
level_set = 0.0
else:
level_set = 0.5
labels = F.one_hot(labels, num_classes=pred.size(-1)).float()
labels = labels * level_set
# print("label", labels.size(), labels.type())
# print("label size", *labels.size())
pred = pred.view(batch_size, points_size, pred.size(-1))
# print("pred", pred.size())
# print("occ", occ)
sk_loss = self.mse_loss(pred, labels)
return sk_loss
def compute_sdf_loss(self, pred_sdf, input_points, surface_normals, surface_point_size):
'''
x: batch of input coordinates
y: usually the output of the trial_soln function
'''
# gt_sdf = gt['sdf']
# gt_normals = gt['normals']
# coords = model_output['model_in']
# pred_sdf = model_output['model_out']
# gradient = diff_operators.gradient(pred_sdf, coords)
# Wherever boundary_values is not equal to zero, we interpret it as a boundary constraint.
# Surface points on the left, off-surface on the right
# sdf_constraint = torch.where(gt_sdf != -1, pred_sdf, torch.zeros_like(pred_sdf))
# inter_constraint = torch.where(gt_sdf != -1, torch.zeros_like(pred_sdf), torch.exp(-1e2 * torch.abs(pred_sdf)))
# normal_constraint = torch.where(gt_sdf != -1, 1 - F.cosine_similarity(gradient, gt_normals, dim=-1)[..., None],
# torch.zeros_like(gradient[..., :1]))
# grad_constraint = torch.abs(gradient.norm(dim=-1) - 1)
gradient = diff_operators.gradient(pred_sdf, input_points)
surface_gradient = gradient[:, :surface_point_size]
surface_pred = pred_sdf[:, :surface_point_size]
off_pred = pred_sdf[:, surface_point_size:]
# import pdb; pdb.set_trace()
sdf_constraint = surface_pred
inter_constraint = torch.exp(-1e2 * torch.abs(off_pred))
normal_constraint = 1 - F.cosine_similarity(surface_gradient, surface_normals, dim=-1)[..., None]
# normal_constraint = torch.zeros_like(gradient[..., :1])
grad_constraint = torch.abs(gradient.norm(dim=-1) - 1)
# Exp # Lapl
# -----------------
return {'sdf': torch.abs(sdf_constraint).mean() * 3e3, # 1e4 # 3e3
'inter': inter_constraint.mean() * 1e2, # 1e2 # 1e3
'normal_constraint': normal_constraint.mean() * 1e2, # 1e2, # 1e2
'grad_constraint': grad_constraint.mean() * 5e1} # 1e1 # 5e1
# inter = 3e3 for ReLU-PE
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
# Point input
if self.use_sdf:
points = data.get('points').to(device)
normals = data.get('normals').to(device)
off_points = data.get('off_points').to(device)
points.requires_grad_(True)
normals.requires_grad_(True)
off_points.requires_grad_(True)
# import pdb; pdb.set_trace()
surface_point_size = points.shape[1]
input_points = torch.cat([points, off_points], 1)
# points_np = points[0, :512].detach().cpu().numpy()
# normals_np = normals[0, :512].detach().cpu().numpy()
# off_points_np = off_points[0, :512].detach().cpu().numpy()
# surface_points = data.get('mesh_verts')[0, :512].detach().cpu().numpy()
# import pdb; pdb.set_trace()
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# ax.scatter(points_np[:, 2], points_np[:, 0], points_np[:, 1], color='b')
# # ax.scatter(off_points_np[:, 2], off_points_np[:, 0], off_points_np[:, 1], color='r')
# ax.scatter(surface_points[:, 2], surface_points[:, 0], surface_points[:, 1], color='orange')
# mesh_verts
# visualize_pointcloud(points_np, normals_np, off_points_np, show=True)
# visualize_pointcloud(points_np, None, off_points_np, show=True)
else:
p = data.get('points').to(device)
occ = data.get('occ').to(device)
# fig = plt.figure()
# ax = fig.gca(projection=Axes3D.name)
# points_np = p[0, :].detach().cpu().numpy()
# in_points = points_np[(occ[0] > 0.5).cpu().numpy(), :]
# out_points = points_np[(occ[0] < 0.5).cpu().numpy(), :]
# ax.scatter(out_points[:, 2], out_points[:, 0], out_points[:, 1])
# ax.scatter(in_points[:, 2], in_points[:, 0], in_points[:, 1])
# # ax.scatter(off_points_np[0, :, 2], off_points_np[0, :, 0], off_points_np[0, :, 1], color='r')
# # visualize_pointcloud(points_np, normals_np, off_points_np, show=True)
# print("max x %.3f, max y %.3f, max z %.3f" % (p[0, :, 0].max(), p[0, :, 1].max(), p[0, :, 2].max()))
# print("min x %.3f, min y %.3f, min z %.3f" % (p[0, :, 0].min(), p[0, :, 1].min(), p[0, :, 2].min()))
# ax.set_xlabel('Z')
# ax.set_ylabel('X')
# ax.set_zlabel('Y')
# ax.set_xlim(-0.55, 0.55)
# ax.set_ylim(-0.55, 0.55)
# ax.set_zlim(-0.55, 0.55)
# plt.show()
# import pdb; pdb.set_trace()
# Encoder inputs
# inputs = data.get('joints_trans', torch.empty(p.size(0), 0)).to(device)
# inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device)
inputs = data.get('inputs').to(device)
# joints = data.get('joints').to(device)
kwargs = {}
if self.model.use_bone_length:
bone_lengths = data.get('bone_lengths').to(device)
else:
bone_lengths = None
loss_dict = {}
c = self.model.encode_inputs(inputs)
# General points
# logits = self.model.decode(p, c, **kwargs).logits
# loss_i = F.binary_cross_entropy_with_logits(
# logits, occ, reduction='none')
# loss = loss + loss_i.sum(-1).mean()
if self.use_sdf:
pred = self.model.decode(input_points, c, bone_lengths=bone_lengths, **kwargs)
losses = self.compute_sdf_loss(pred, input_points, normals, surface_point_size)
loss = 0.
# (sdf, inter, normal_constraint, grad_constraint)
for loss_name, loss_value in losses.items():
loss += loss_value.mean()
loss_dict[loss_name] = loss_value.item()
else:
pred = self.model.decode(p, c, bone_lengths=bone_lengths, **kwargs)
# print("pred", pred)
# print("occ", occ)
loss = self.mse_loss(pred, occ)
loss_dict['occ'] = loss.item()
if self.skinning_loss_weight > 0:
sk_loss = self.compute_skinning_loss(c, data, bone_lengths=bone_lengths)
loss_dict['skin'] = sk_loss.item()
loss = loss + self.skinning_loss_weight * sk_loss
# import pdb; pdb.set_trace()
loss_dict['total'] = loss.item()
return loss, loss_dict
| 14,705 | 37.904762 | 121 | py |
Im2Hands | Im2Hands-main/artihand/nasa/config.py | import os
import torch
import torch.distributions as dist
from torch import nn
from artihand import data
from artihand import config
from artihand.nasa import models, training, generation
from artihand.nasa import init_occ_training, ref_occ_training, kpts_ref_training
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
decoder = cfg['model']['decoder']
encoder = cfg['model']['encoder']
encoder_latent = cfg['model']['encoder_latent']
dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
decoder_c_dim = cfg['model']['decoder_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
encoder_kwargs = cfg['model']['encoder_kwargs']
encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
decoder_part_output = (decoder == 'piece_rigid' or decoder == 'piece_deform' or decoder == 'piece_deform_pifu')
use_bone_length = cfg['model']['use_bone_length']
decoder = models.decoder_dict[decoder](
decoder_c_dim,
use_bone_length=use_bone_length,
**decoder_kwargs
)
if cfg['model']['type'] == 'init_occ' or cfg['model']['type'] == 'ref_occ':
left_decoder = cfg['model']['decoder']
left_decoder = models.decoder_dict[left_decoder](
decoder_c_dim,
use_bone_length=use_bone_length,
add_feature_dim=19,
add_feature_layer_idx=2,
**decoder_kwargs
)
right_decoder = cfg['model']['decoder']
right_decoder = models.decoder_dict[right_decoder](
decoder_c_dim,
use_bone_length=use_bone_length,
add_feature_dim=19,
add_feature_layer_idx=2,
**decoder_kwargs
)
model = models.ArticulatedHandNetInitOcc(
left_decoder, right_decoder,
device=device
)
if cfg['model']['type'] == 'ref_occ':
init_occ_estimator = model
if cfg['model']['type'] == 'ref_occ':
decoder_key = second_decoder = cfg['model']['decoder']
model = models.ArticulatedHandNetRefOcc(
init_occ_estimator, device=device
)
elif cfg['model']['type'] == 'kpts_ref':
model = models.ArticulatedHandNetKptsRef(
device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
skinning_loss_weight = cfg['model']['skinning_weight']
use_sdf = cfg['model']['use_sdf']
if cfg['model']['type'] == 'init_occ':
trainer = init_occ_training.Trainer(
model, optimizer, skinning_loss_weight=skinning_loss_weight,
device=device, input_type=input_type,
threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
elif cfg['model']['type'] == 'ref_occ':
trainer = ref_occ_training.Trainer(
model, optimizer, skinning_loss_weight=skinning_loss_weight,
device=device, input_type=input_type,
threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
elif cfg['model']['type'] == 'kpts_ref':
trainer = kpts_ref_training.Trainer(
model, optimizer, skinning_loss_weight=skinning_loss_weight,
device=device, input_type=input_type,
threshold=threshold,
eval_sample=cfg['training']['eval_sample'],
)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
preprocessor = config.get_preprocessor(cfg, device=device)
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
with_color_labels=cfg['generation']['vert_labels'],
convert_to_canonical=cfg['generation']['convert_to_canonical'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
preprocessor=preprocessor,
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns prior distribution for latent code z.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_data_fields(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
points_transform = data.SubsamplePoints(cfg['data']['points_subsample'])
with_transforms = cfg['model']['use_camera']
fields = {}
fields['points'] = data.PointsField(
cfg['data']['points_file'], points_transform,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
fields['points_iou'] = data.PointsField(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
if voxels_file is not None:
fields['voxels'] = data.VoxelsField(voxels_file)
return fields
def get_data_helpers(mode, cfg):
''' Returns the data fields.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
with_transforms = cfg['model']['use_camera']
helpers = {}
if mode in ('val', 'test'):
points_iou_file = cfg['data']['points_iou_file']
voxels_file = cfg['data']['voxels_file']
if points_iou_file is not None:
helpers['points_iou'] = data.PointsHelper(
points_iou_file,
with_transforms=with_transforms,
unpackbits=cfg['data']['points_unpackbits'],
)
# if voxels_file is not None:
# fields['voxels'] = data.VoxelsField(voxels_file)
return helpers
def get_data_transforms(mode, cfg):
''' Returns the data transform dict of callable.
Args:
mode (str): the mode which is used
cfg (dict): imported yaml config
'''
transform_dict = {}
if cfg['model']['use_sdf']:
transform_dict['points'] = data.SubsamplePointcloud(cfg['data']['points_subsample'])
# transform_dict['off_points'] = data.SubsampleOffPoint(cfg['data']['points_subsample'])
transform_dict['off_points'] = data.SampleOffPoint(cfg['data']['points_subsample'])
else:
transform_dict['points'] = data.SubsamplePoints(cfg['data']['points_subsample'])
if (cfg['model']['decoder'] == 'piece_rigid' or
cfg['model']['decoder'] == 'piece_deform' or
cfg['model']['decoder'] == 'piece_deform_pifu'):
transform_dict['mesh_points'] = data.SubsampleMeshVerts(cfg['data']['mesh_verts_subsample'])
# transform_dict['reshape_occ'] = data.ReshapeOcc(cfg['data']['mesh_verts_subsample'])
return transform_dict
| 8,082 | 32.127049 | 115 | py |
Im2Hands | Im2Hands-main/artihand/nasa/ref_occ_training.py | import os
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from torch import distributions as dist
from tqdm import trange
from im2mesh.common import (
compute_iou, make_3d_grid
)
from artihand.utils import visualize as vis
from artihand.training import BaseTrainer
from artihand import diff_operators
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.'''
def __init__(self, model, optimizer, skinning_loss_weight=0, device=None,
input_type='img', threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.skinning_loss_weight = skinning_loss_weight
self.device = device
self.input_type = input_type
self.threshold = threshold
self.eval_sample = eval_sample
self.mse_loss = torch.nn.MSELoss()
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.model.init_occ.eval()
self.optimizer.zero_grad()
loss, loss_dict = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss_dict
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
img, camera_params, mano_data, _ = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
points = {'left': mano_data['left'].get('points_iou.points').to(device),
'right': mano_data['right'].get('points_iou.points').to(device)}
anchor_points = {'left': mano_data['left'].get('anchor_points').to(device),
'right': mano_data['right'].get('anchor_points').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
occ_iou = {'left': mano_data['left'].get('points_iou.occ').to(device),
'right': mano_data['right'].get('points_iou.occ').to(device)}
kwargs = {}
with torch.no_grad():
left_occ, right_occ = self.model(img, camera_params, inputs, points, anchor_points, root_rot_mat, bone_lengths, sample=self.eval_sample, **kwargs)
left_occ_iou_np = (occ_iou['left'] >= 0.5).cpu().numpy()
right_occ_iou_np = (occ_iou['right'] >= 0.5).cpu().numpy()
left_occ_iou_hat_np = (left_occ >= threshold).cpu().numpy()
right_occ_iou_hat_np = (right_occ >= threshold).cpu().numpy()
left_iou = compute_iou(left_occ_iou_np, left_occ_iou_hat_np).mean()
right_iou = compute_iou(right_occ_iou_np, right_occ_iou_hat_np).mean()
eval_dict['iou'] = (left_iou + right_iou) / 2
return eval_dict
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
self.model = self.model.to(device)
threshold = self.threshold
img, camera_params, mano_data, _ = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
points = {'left': mano_data['left'].get('points').to(device),
'right': mano_data['right'].get('points').to(device)}
anchor_points = {'left': mano_data['left'].get('anchor_points').to(device),
'right': mano_data['right'].get('anchor_points').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
occ = {'left': mano_data['left'].get('occ').to(device),
'right': mano_data['right'].get('occ').to(device)}
kwargs = {}
left_occ, right_occ, pen_left_occ, pen_right_occ = self.model(img, camera_params, inputs, points, anchor_points, root_rot_mat, bone_lengths, pen=True, sample=self.eval_sample, **kwargs)
loss_dict = {}
occ_loss = self.mse_loss(left_occ, occ['left']) + self.mse_loss(right_occ, occ['right'])
loss_dict['occ'] = occ_loss.item()
penetration_loss = (left_occ * pen_right_occ) + (right_occ * pen_left_occ)
penetration_loss = penetration_loss.sum()
loss_dict['pen'] = penetration_loss
loss = occ_loss + 0.001 * penetration_loss
loss_dict['total'] = loss.item()
return loss, loss_dict
| 5,242 | 32.825806 | 193 | py |
Im2Hands | Im2Hands-main/artihand/nasa/generation.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange
import trimesh
from im2mesh.utils import libmcubes
from im2mesh.common import make_3d_grid
from im2mesh.utils.libsimplify import simplify_mesh
from im2mesh.utils.libmise import MISE
import time
from skimage import measure
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
with_color_labels (bool): whether to assign part-color to the output mesh vertices
convert_to_canonical (bool): whether to reconstruct mesh in canonical pose (for debugging)
simplify_nfaces (int): number of faces the mesh should be simplified to
preprocessor (nn.Module): preprocessor for inputs
'''
def __init__(self, model, points_batch_size=1000000,
threshold=0.45, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
with_color_labels=False,
convert_to_canonical=False,
simplify_nfaces=None,
preprocessor=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.padding = padding
self.sample = sample
self.with_color_labels = with_color_labels
self.convert_to_canonical = convert_to_canonical
self.simplify_nfaces = simplify_nfaces
self.preprocessor = preprocessor
self.bone_colors = np.array([
(119, 41, 191, 255), (75, 170, 46, 255), (116, 61, 134, 255), (44, 121, 216, 255), (250, 191, 216, 255), (129, 64, 130, 255),
(71, 242, 184, 255), (145, 60, 43, 255), (51, 68, 187, 255), (208, 250, 72, 255), (104, 155, 87, 255), (189, 8, 224, 255),
(193, 172, 145, 255), (72, 93, 70, 255), (28, 203, 124, 255), (131, 207, 80, 255)
], dtype=np.uint8
)
def init_occ_generate_mesh(self, data, threshold=0.5):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
kwargs = {}
img, camera_params, mano_data, idx = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.model.image_encoder(img.cuda())
img_f, hms_f, dp_f = img_fmaps[-1], hms_fmaps[-1], dp_fmaps[-1]
img_feat = torch.cat((hms_f, dp_f), 1)
img_feat = self.model.image_final_layer(img_feat)
if threshold is None:
threshold = self.threshold
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
left_values, right_values = self.init_occ_eval_points(img_feat, camera_params, inputs, (pointsf, pointsf), root_rot_mat, bone_lengths, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
left_mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
right_mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
left_points = left_mesh_extractor.query()
right_points = right_mesh_extractor.query()
while left_points.shape[0] != 0 or right_points.shape[0] != 0:
# Query points
left_pointsf = torch.FloatTensor(left_points).to(self.device)
right_pointsf = torch.FloatTensor(right_points).to(self.device)
# Normalize to bounding box
left_pointsf = left_pointsf / left_mesh_extractor.resolution
left_pointsf = box_size * (left_pointsf - 0.5)
right_pointsf = right_pointsf / right_mesh_extractor.resolution
right_pointsf = box_size * (right_pointsf - 0.5)
# Evaluate model and update
left_values, right_values = self.init_occ_eval_points(img_feat, camera_params, inputs, (left_pointsf, right_pointsf), root_rot_mat, bone_lengths=bone_lengths, **kwargs)
left_values = left_values.cpu().numpy()
right_values = right_values.cpu().numpy()
left_values = left_values.astype(np.float64)
right_values = right_values.astype(np.float64)
left_mesh_extractor.update(left_points, left_values)
right_mesh_extractor.update(right_points, right_values)
left_points = left_mesh_extractor.query()
right_points = right_mesh_extractor.query()
left_value_grid = left_mesh_extractor.to_dense()
right_value_grid = right_mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
left_mesh = self.extract_mesh(left_value_grid, inputs['left'], bone_lengths['left'], stats_dict=stats_dict, threshold=threshold)
right_mesh = self.extract_mesh(right_value_grid, inputs['left'], bone_lengths['right'], stats_dict=stats_dict, threshold=threshold)
return left_mesh, right_mesh
def ref_occ_generate_mesh(self, data, threshold=0.45):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
kwargs = {}
img, camera_params, mano_data, idx = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
anchor_points = {'left': mano_data['left'].get('anchor_points').to(device),
'right': mano_data['right'].get('anchor_points').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
img = img.cuda()
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.model.image_encoder(img)
hms_global = self.model.hms_global_layer(hms_fmaps[0]).squeeze(-1).squeeze(-1)
dp_global = self.model.dp_global_layer(dp_fmaps[0]).squeeze(-1).squeeze(-1)
img_global = torch.cat([hms_global, dp_global], 1)
img_f = nn.functional.interpolate(img_fmaps[-1], size=[256, 256], mode='bilinear')
hms_f = nn.functional.interpolate(hms_fmaps[-1], size=[256, 256], mode='bilinear')
dp_f = nn.functional.interpolate(dp_fmaps[-1], size=[256, 256], mode='bilinear')
img_feat = torch.cat((hms_f, dp_f), 1)
img_feat = self.model.image_final_layer(img_feat)
img_feat = (img_feat, img_global)
if threshold is None:
threshold = self.threshold
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
left_values, right_values = self.ref_occ_eval_points(img, img_feat, camera_params, inputs, (pointsf, pointsf), anchor_points, root_rot_mat, bone_lengths, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
left_mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
right_mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
left_points = left_mesh_extractor.query()
right_points = right_mesh_extractor.query()
while left_points.shape[0] != 0 or right_points.shape[0] != 0:
# Query points
left_pointsf = torch.FloatTensor(left_points).to(self.device)
right_pointsf = torch.FloatTensor(right_points).to(self.device)
# Normalize to bounding box
left_pointsf = left_pointsf / left_mesh_extractor.resolution
left_pointsf = box_size * (left_pointsf - 0.5)
right_pointsf = right_pointsf / right_mesh_extractor.resolution
right_pointsf = box_size * (right_pointsf - 0.5)
# Evaluate model and update
left_values, right_values = self.ref_occ_eval_points(img, img_feat, camera_params, inputs, (left_pointsf, right_pointsf), anchor_points, root_rot_mat, bone_lengths=bone_lengths, **kwargs)
left_values = left_values.cpu().numpy()
right_values = right_values.cpu().numpy()
left_values = left_values.astype(np.float64)
right_values = right_values.astype(np.float64)
left_mesh_extractor.update(left_points, left_values)
right_mesh_extractor.update(right_points, right_values)
left_points = left_mesh_extractor.query()
right_points = right_mesh_extractor.query()
left_value_grid = left_mesh_extractor.to_dense()
right_value_grid = right_mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
left_mesh = self.extract_mesh(left_value_grid, inputs['left'], bone_lengths['left'], stats_dict=stats_dict, threshold=threshold)
right_mesh = self.extract_mesh(right_value_grid, inputs['left'], bone_lengths['right'], stats_dict=stats_dict, threshold=threshold)
return left_mesh, right_mesh
def generate_mesh(self, data, return_stats=True, threshold=None, pointcloud=False, return_intermediate=False, e2e=False):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
stats_dict = {}
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
bone_lengths = data.get('bone_lengths')
if bone_lengths is not None:
bone_lengths = bone_lengths.to(device)
kwargs = {}
# Preprocess if requires # currently - none
if self.preprocessor is not None:
print('check - preprocess')
t0 = time.time()
with torch.no_grad():
inputs = self.preprocessor(inputs)
stats_dict['time (preprocess)'] = time.time() - t0
# Encode inputs - this is actually identity function (input - output not changed)
t0 = time.time()
with torch.no_grad():
c = self.model.encode_inputs(inputs)
stats_dict['time (encode inputs)'] = time.time() - t0
#print(c.size())
# z = self.model.get_z_from_prior((1,), sample=self.sample).to(device)
mesh = self.generate_from_latent(c, bone_lengths=bone_lengths, stats_dict=stats_dict, threshold=threshold, pointcloud=pointcloud, return_intermediate = return_intermediate, **kwargs)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, bone_lengths=None, stats_dict={}, threshold=None, pointcloud=False, return_intermediate=False, side=None, **kwargs):
''' Generates mesh from latent.
Args:
# z (tensor): latent code z
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
if threshold is None:
threshold = self.threshold
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
# values = self.eval_points(pointsf, z, c, **kwargs).cpu().numpy()
values = self.eval_points(pointsf, c, bone_lengths=bone_lengths, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
# center = torch.FloatTensor([-0.15, 0.0, 0.0]).to(self.device)
# box_size = 0.8
while points.shape[0] != 0:
# Query points
pointsf = torch.FloatTensor(points).to(self.device)
# Normalize to bounding box
pointsf = pointsf / mesh_extractor.resolution
pointsf = box_size * (pointsf - 0.5)
# Evaluate model and update
# import pdb; pdb.set_trace()
values = self.eval_points(
pointsf, c, bone_lengths=bone_lengths, side=side, **kwargs).cpu().numpy()
# import pdb; pdb.set_trace()
# values = self.eval_points(
# pointsf, z, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
# mesh = self.extract_mesh(value_grid, z, c, stats_dict=stats_dict)
if return_intermediate:
return value_grid
if not pointcloud:
mesh = self.extract_mesh(value_grid, c, bone_lengths=bone_lengths, stats_dict=stats_dict, threshold=threshold)
else:
mesh = self.extract_pointcloud(value_grid, c, bone_lengths=bone_lengths, stats_dict=stats_dict, threshold=threshold)
return mesh
def init_occ_eval_points(self, img_feat, camera_params, c, p, root_rot_mat, bone_lengths, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
left_p, right_p = p
left_p_split = torch.split(left_p, self.points_batch_size)
right_p_split = torch.split(right_p, self.points_batch_size)
left_occ_hats = []
right_occ_hats = []
assert len(left_p_split) == len(right_p_split)
for idx in range(len(right_p_split)):
left_pi = left_p_split[idx]
right_pi = right_p_split[idx]
left_pi = left_pi.unsqueeze(0).to(self.device)
right_pi = right_pi.unsqueeze(0).to(self.device)
p = {'left': left_pi, 'right': right_pi}
with torch.no_grad():
left_occ_hat, right_occ_hat = self.model.decode(img_feat, camera_params, c, p, root_rot_mat, bone_lengths, **kwargs)
left_occ_hats.append(left_occ_hat.squeeze(0).detach().cpu())
right_occ_hats.append(right_occ_hat.squeeze(0).detach().cpu())
left_occ_hat = torch.cat(left_occ_hats, dim=0)
right_occ_hat = torch.cat(right_occ_hats, dim=0)
return left_occ_hat, right_occ_hat
def ref_occ_eval_points(self, img, img_feat, camera_params, c, p, anchor_points, root_rot_mat, bone_lengths, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
left_p, right_p = p
left_p_split = torch.split(left_p, self.points_batch_size)
right_p_split = torch.split(right_p, self.points_batch_size)
left_occ_hats = []
right_occ_hats = []
assert len(left_p_split) == len(right_p_split)
for idx in range(len(right_p_split)):
left_pi = left_p_split[idx]
right_pi = right_p_split[idx]
left_pi = left_pi.unsqueeze(0).to(self.device)
right_pi = right_pi.unsqueeze(0).to(self.device)
p = {'left': left_pi, 'right': right_pi}
with torch.no_grad():
left_occ_hat, right_occ_hat = self.model(img, camera_params, c, p, anchor_points, root_rot_mat, bone_lengths, img_feat=img_feat, test=True, **kwargs)
left_occ_hats.append(left_occ_hat.squeeze(0).detach().cpu())
right_occ_hats.append(right_occ_hat.squeeze(0).detach().cpu())
left_occ_hat = torch.cat(left_occ_hats, dim=0)
right_occ_hat = torch.cat(right_occ_hats, dim=0)
return left_occ_hat, right_occ_hat
def extract_mesh(self, occ_hat, c=None, bone_lengths=None, stats_dict=dict(), threshold=None):
''' Extracts the mesh from the predicted occupancy grid.occ_hat
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
if threshold is None:
threshold = self.threshold
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# Undo padding
vertices -= 1
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
if vertices.shape[0] == 0:
mesh = trimesh.Trimesh(vertices, triangles)
return mesh
# Get point colors
if self.with_color_labels:
vert_labels = self.eval_point_colors(vertices, c, bone_lengths=bone_lengths)
vertex_colors = self.bone_colors[vert_labels]
if self.convert_to_canonical:
vertices = self.convert_mesh_to_canonical(vertices, c, vert_labels)
vertices = vertices
else:
vertex_colors = None
# Estimate normals if needed
if self.with_normals and not vertices.shape[0] == 0:
t0 = time.time()
normals = self.estimate_normals(vertices, c)
stats_dict['time (normals)'] = time.time() - t0
else:
normals = None
# Create mesh
mesh = trimesh.Trimesh(vertices, triangles,
vertex_normals=normals,
vertex_colors=vertex_colors, ##### add vertex colors
# face_colors=face_colors, ##### try face color
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def convert_mesh_to_canonical(self, vertices, trans_mat, vert_labels):
''' Converts the mesh vertices back to canonical pose using the input transformation matrices
and the labels.
Args:
vertices (numpy array?): vertices of the mesh
c (tensor): latent conditioned code c. Must be a transformation matices without projection.
vert_labels (tensor): labels indicating which sub-model each vertex belongs to.
'''
# print(trans_mat.shape)
# print(vertices.shape)
# print(type(vertices))
# print(vert_labels.shape)
pointsf = torch.FloatTensor(vertices).to(self.device)
# print("pointssf before", pointsf.shape)
# [V, 3] -> [V, 4, 1]
pointsf = torch.cat([pointsf, pointsf.new_ones(pointsf.shape[0], 1)], dim=1)
pointsf = pointsf.unsqueeze(2)
# print("pointsf", pointsf.shape)
vert_trans_mat = trans_mat[0, vert_labels]
# print(vert_trans_mat.shape)
new_vertices = torch.matmul(vert_trans_mat, pointsf)
vertices = new_vertices[:,:3].squeeze(2).detach().cpu().numpy()
# print("return", vertices.shape)
return vertices # new_vertices
def estimate_normals(self, vertices, c=None):
''' Estimates the normals by computing the gradient of the objective.
Args:
vertices (numpy array): vertices of the mesh
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
device = self.device
vertices = torch.FloatTensor(vertices)
vertices_split = torch.split(vertices, self.points_batch_size)
normals = []
# z, c = z.unsqueeze(0), c.unsqueeze(0)
c = c.unsqueeze(0)
for vi in vertices_split:
vi = vi.unsqueeze(0).to(device)
vi.requires_grad_()
# occ_hat = self.model.decode(vi, z, c).logits
occ_hat = self.model.decode(vi, c)
out = occ_hat.sum()
out.backward()
ni = -vi.grad
ni = ni / torch.norm(ni, dim=-1, keepdim=True)
ni = ni.squeeze(0).cpu().numpy()
normals.append(ni)
normals = np.concatenate(normals, axis=0)
return normals
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
# z (tensor): latent code z
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
# self.model.decode(face_point.unsqueeze(0), z, c).logits
self.model.decode(face_point.unsqueeze(0), c)
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
| 25,868 | 38.494656 | 203 | py |
Im2Hands | Im2Hands-main/artihand/nasa/kpts_ref_training.py | import os
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from torch import distributions as dist
from tqdm import trange
from im2mesh.common import (
compute_iou, make_3d_grid
)
from artihand.utils import visualize as vis
from artihand.training import BaseTrainer
from artihand import diff_operators
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
def preprocess_joints(left_joints, right_joints, camera_params, root_rot_mat, return_mid=False):
# preprocess left joints
#left_joints = torch.bmm(xyz_to_xyz1(left_joints.double()), root_rot_mat['left'].double())[:, :, :3]
left_joints = left_joints + (camera_params['left_root_xyz'].cuda().unsqueeze(1))
left_joints = left_joints * torch.Tensor([-1., 1., 1.]).cuda()
left_joints = torch.bmm(left_joints, camera_params['R'].transpose(1,2).double().cuda()) + camera_params['T'].double().cuda().unsqueeze(1)
# preprocess right joints
#right_joints = torch.bmm(xyz_to_xyz1(right_joints.double()), root_rot_mat['right'].double())[:, :, :3]
right_joints = right_joints + (camera_params['right_root_xyz'].cuda().unsqueeze(1))
right_joints = torch.bmm(right_joints, camera_params['R'].transpose(1,2).double().cuda()) + camera_params['T'].double().cuda().unsqueeze(1)
# normalize
left_mid_joint = left_joints[:, 4, :].unsqueeze(1)
left_joints = left_joints - left_mid_joint
right_joints = right_joints - left_mid_joint
if return_mid:
return left_joints*1000, right_joints*1000, left_mid_joint
return left_joints*1000, right_joints*1000
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
skinning_loss_weight (float): skinning loss weight for part model
device (device): pytorch device
input_type (str): input type
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, skinning_loss_weight=0, device=None,
input_type='img', threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.skinning_loss_weight = skinning_loss_weight
self.device = device
self.input_type = input_type
self.threshold = threshold
self.eval_sample = eval_sample
self.loss = torch.nn.MSELoss()
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss, loss_dict = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss_dict
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
img, camera_params, mano_data, _ = data
joints_gt = {'left': mano_data['left'].get('joints').to(device),
'right': mano_data['right'].get('joints').to(device)}
joints = {'left': mano_data['left'].get('pred_joints').to(device),
'right': mano_data['right'].get('pred_joints').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['left'].get('root_rot_mat').to(device)}
kwargs = {}
# joint space conversion & normalization
left_joints, right_joints = preprocess_joints(joints['left'], joints['right'], camera_params, root_rot_mat)
left_joints_gt, right_joints_gt = preprocess_joints(joints_gt['left'], joints_gt['right'], camera_params, root_rot_mat)
in_joints = {'left': left_joints, 'right': right_joints}
with torch.no_grad():
left_joints_pred, right_joints_pred = self.model(img, camera_params, in_joints, **kwargs)
left_joints_pred = left_joints_pred - left_joints_pred[:, 4, :].unsqueeze(1)
right_joints_pred = right_joints_pred - right_joints_pred[:, 4, :].unsqueeze(1)
left_joints_gt = left_joints_gt - left_joints_gt[:, 4, :].unsqueeze(1)
right_joints_gt = right_joints_gt - right_joints_gt[:, 4, :].unsqueeze(1)
left_joints = left_joints - left_joints[:, 4, :].unsqueeze(1)
right_joints = right_joints - right_joints[:, 4, :].unsqueeze(1)
eval_dict['joint_err'] = self.loss(left_joints_pred.to(torch.float64), left_joints_gt).item() \
+ self.loss(right_joints_pred.to(torch.float64), right_joints_gt).item()
joints_num = left_joints_pred.shape[0]
left_jpe, right_jpe = 0., 0.
in_left_jpe, in_right_jpe = 0., 0.
'''
import open3d as o3d
import cv2
img = cv2.imread(camera_params['img_path'][0])
cv2.imwrite('debug/check.jpg', img)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(left_joints_pred[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/left_joints_pred.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(right_joints_pred[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/right_joints_pred.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(left_joints_gt[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/left_joints_gt.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(right_joints_gt[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/right_joints_gt.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(left_joints[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/left_joints.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(right_joints[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/right_joints.ply", pcd)
exit()
'''
for i in range(joints_num):
left_jpe += torch.linalg.norm((left_joints_pred[i] - left_joints_gt[i]), ord=2, dim=-1).mean().item()
right_jpe += torch.linalg.norm((right_joints_pred[i] - right_joints_gt[i]), ord=2, dim=-1).mean().item()
in_left_jpe += torch.linalg.norm((left_joints[i] - left_joints_gt[i]), ord=2, dim=-1).mean().item()
in_right_jpe += torch.linalg.norm((right_joints[i] - right_joints_gt[i]), ord=2, dim=-1).mean().item()
left_jpe /= joints_num
right_jpe /= joints_num
in_left_jpe /= joints_num
in_right_jpe /= joints_num
eval_dict['jpe'] = (left_jpe + right_jpe) * 0.5
eval_dict['in_jpe'] = (in_left_jpe + in_right_jpe) * 0.5
return eval_dict
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
self.model = self.model.to(device)
threshold = self.threshold
img, camera_params, mano_data, _ = data
joints_gt = {'left': mano_data['left'].get('joints').to(device),
'right': mano_data['right'].get('joints').to(device)}
joints = {'left': mano_data['left'].get('pred_joints').to(device),
'right': mano_data['right'].get('pred_joints').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['left'].get('root_rot_mat').to(device)}
kwargs = {}
# joint space conversion & normalization
left_joints, right_joints = preprocess_joints(joints['left'], joints['right'], camera_params, root_rot_mat)
left_joints_gt, right_joints_gt = preprocess_joints(joints_gt['left'], joints_gt['right'], camera_params, root_rot_mat)
in_joints = {'left': left_joints, 'right': right_joints}
left_joints_pred, right_joints_pred = self.model(img, camera_params, in_joints, **kwargs)
left_joints_pred = left_joints_pred - left_joints_pred[:, 4, :].unsqueeze(1)
right_joints_pred = right_joints_pred - right_joints_pred[:, 4, :].unsqueeze(1)
left_joints_gt = left_joints_gt - left_joints_gt[:, 4, :].unsqueeze(1)
right_joints_gt = right_joints_gt - right_joints_gt[:, 4, :].unsqueeze(1)
loss = self.loss(left_joints_pred.to(torch.float64), left_joints_gt) \
+ self.loss(right_joints_pred.to(torch.float64), right_joints_gt)
loss_dict = {}
loss_dict['total'] = loss.item()
return loss, loss_dict
| 8,788 | 38.236607 | 143 | py |
Im2Hands | Im2Hands-main/artihand/nasa/init_occ_training.py | import os
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from torch import distributions as dist
from tqdm import trange
from im2mesh.common import (
compute_iou, make_3d_grid
)
from artihand.utils import visualize as vis
from artihand.training import BaseTrainer
from artihand import diff_operators
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
class Trainer(BaseTrainer):
''' Trainer object for the Occupancy Network.'''
def __init__(self, model, optimizer, skinning_loss_weight=0, device=None,
input_type='img', threshold=0.5, eval_sample=False):
self.model = model
self.optimizer = optimizer
self.skinning_loss_weight = skinning_loss_weight
self.device = device
self.input_type = input_type
self.threshold = threshold
self.eval_sample = eval_sample
self.mse_loss = torch.nn.MSELoss()
def train_step(self, data):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
loss, loss_dict = self.compute_loss(data)
loss.backward()
self.optimizer.step()
return loss_dict
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
threshold = self.threshold
eval_dict = {}
img, camera_params, mano_data, _ = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
points = {'left': mano_data['left'].get('points_iou.points').to(device),
'right': mano_data['right'].get('points_iou.points').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
occ_iou = {'left': mano_data['left'].get('points_iou.occ').to(device),
'right': mano_data['right'].get('points_iou.occ').to(device)}
kwargs = {}
with torch.no_grad():
left_occ, right_occ = self.model(img, camera_params, inputs, points, root_rot_mat, bone_lengths, sample=self.eval_sample, **kwargs)
# evaluation
left_occ_iou_np = (occ_iou['left'] >= 0.5).cpu().numpy()
right_occ_iou_np = (occ_iou['right'] >= 0.5).cpu().numpy()
left_occ_iou_hat_np = (left_occ >= threshold).cpu().numpy()
right_occ_iou_hat_np = (right_occ >= threshold).cpu().numpy()
left_iou = compute_iou(left_occ_iou_np, left_occ_iou_hat_np).mean()
right_iou = compute_iou(right_occ_iou_np, right_occ_iou_hat_np).mean()
eval_dict['iou'] = (left_iou + right_iou) / 2
batch_size = points['left'].size(0)
return eval_dict
def compute_skinning_loss(self, c, data, bone_lengths=None):
''' Computes skinning loss for part-base regularization.'''
device = self.device
p = data.get('mesh_verts').to(device)
labels = data.get('mesh_vert_labels').to(device)
batch_size, points_size, p_dim = p.size()
kwargs = {}
pred = self.model.decode(p, c, bone_lengths=bone_lengths, reduce_part=False, **kwargs)
labels = labels.long()
level_set = 0.5
labels = F.one_hot(labels, num_classes=pred.size(-1)).float()
labels = labels * level_set
pred = pred.view(batch_size, points_size, pred.size(-1))
sk_loss = self.mse_loss(pred, labels)
return sk_loss
def compute_loss(self, data):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
self.model = self.model.to(device)
threshold = self.threshold
img, camera_params, mano_data, _ = data
inputs = {'left': mano_data['left'].get('inputs').to(device),
'right': mano_data['right'].get('inputs').to(device)}
points = {'left': mano_data['left'].get('points').to(device),
'right': mano_data['right'].get('points').to(device)}
root_rot_mat = {'left': mano_data['left'].get('root_rot_mat').to(device),
'right': mano_data['right'].get('root_rot_mat').to(device)}
bone_lengths = {'left': mano_data['left'].get('bone_lengths').to(device),
'right': mano_data['right'].get('bone_lengths').to(device)}
occ = {'left': mano_data['left'].get('occ').to(device),
'right': mano_data['right'].get('occ').to(device)}
kwargs = {}
left_occ, right_occ = self.model(img, camera_params, inputs, points, root_rot_mat, bone_lengths, sample=self.eval_sample, **kwargs)
loss_dict = {}
occ_loss = self.mse_loss(left_occ, occ['left']) + self.mse_loss(right_occ, occ['right'])
loss_dict['occ'] = occ_loss.item()
if self.skinning_loss_weight > 0:
sk_loss = self.compute_skinning_loss(c, mano_data, bone_lengths=bone_lengths)
loss_dict['skin'] = sk_loss.item()
loss = loss + self.skinning_loss_weight * sk_loss
loss = occ_loss
loss_dict['total'] = loss.item()
return loss, loss_dict
| 5,651 | 31.113636 | 143 | py |
Im2Hands | Im2Hands-main/artihand/nasa/models/core_init_occ.py | import sys
import torch
import torch.nn as nn
from torch import distributions as dist
from dependencies.halo.halo_adapter.converter import PoseConverter, transform_to_canonical
from dependencies.halo.halo_adapter.interface import (get_halo_model, convert_joints, change_axes, scale_halo_trans_mat)
from dependencies.halo.halo_adapter.projection import get_projection_layer
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
from dependencies.intaghand.models.encoder import ResNetSimple
from dependencies.intaghand.models.model_attn.img_attn import *
from dependencies.intaghand.models.model_attn.self_attn import *
class ArticulatedHandNetInitOcc(nn.Module):
''' Occupancy Network class.'''
def __init__(self, left_decoder, right_decoder, device=None):
super().__init__()
self.image_encoder = ResNetSimple(model_type='resnet50',
pretrained=True,
fmapDim=[128, 128, 128, 128],
handNum=2,
heatmapDim=21)
self.image_final_layer = nn.Conv2d(256, 32, 1)
self.left_pt_embeddings = nn.Sequential(nn.Conv1d(3, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 16, 1))
self.right_pt_embeddings = nn.Sequential(nn.Conv1d(3, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 16, 1))
self.img_ex_left = img_ex(64, 32, # img_size, img_f_dim
4, 32, # grid_size, grid_f_dim
19, # verts_f_dim
n_heads=2,
dropout=0.01)
self.img_ex_right = img_ex(64, 32, # img_size, img_f_dim
4, 32, # grid_size, grid_f_dim
19, # verts_f_dim
n_heads=2,
dropout=0.01)
self.left_decoder = left_decoder.to(device)
self.right_decoder = right_decoder.to(device)
self._device = device
def forward(self, img, camera_params, inputs, p, root_rot_mat, bone_lengths, sample=True, pen=False, **kwargs):
''' Performs a forward pass through the network.'''
left_c, right_c = inputs
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.image_encoder(img.cuda())
img_feat = torch.cat((hms_fmaps[-1], dp_fmaps[-1]), 1)
img_feat = self.image_final_layer(img_feat)
left_p_r, right_p_r = self.decode(img_feat, camera_params, inputs, p, root_rot_mat, bone_lengths, pen=pen, **kwargs)
return left_p_r, right_p_r
def decode(self, img_feat, camera_params, c, p, root_rot_mat, bone_lengths, reduce_part=True, return_model_indices=False, test=False, pen=False, **kwargs):
''' Returns occupancy probabilities for the sampled points.'''
for side in ['left', 'right']:
# swap query points for penetration check during refined occupancy estimation
if pen:
if side == 'left':
side = 'right'
else:
side = 'left'
img_p = p[side] * 0.4
if test:
img_p = img_p * 0.4
img_p = torch.bmm(xyz_to_xyz1(img_p), root_rot_mat[side])[:, :, :3]
img_p = img_p + (camera_params[f'{side}_root_xyz'].cuda().unsqueeze(1))
if side == 'left':
img_p = img_p * torch.Tensor([-1., 1., 1.]).cuda()
if img_p.shape[1] != 0:
sub_p = p[side] / 0.4
batch_size, points_size = sub_p.shape[0], sub_p.shape[1]
sub_p = sub_p.reshape(batch_size * points_size, -1)
# finish swapping
if pen:
if side == 'left':
side = 'right'
else:
side = 'left'
batch_size = img_p.shape[0]
if img_p.shape[1] != 0:
img_p = torch.bmm(img_p, camera_params['R'].transpose(1,2).cuda()) + camera_params['T'].cuda().unsqueeze(1)
root_z = img_p[:, 0, 2]
for i in range(batch_size):
img_p[i, :, 2] = img_p[i, :, 2] - root_z[i]
if side == 'left':
pt_feat = self.left_pt_embeddings(img_p.transpose(1,2))
pt_feat = torch.cat((img_p, pt_feat.transpose(1,2)), 2)
local_img_feat = self.img_ex_left(img_feat, pt_feat)
else:
pt_feat = self.right_pt_embeddings(img_p.transpose(1,2))
pt_feat = torch.cat((img_p, pt_feat.transpose(1,2)), 2)
local_img_feat = self.img_ex_right(img_feat, pt_feat)
local_img_feat = local_img_feat.reshape(local_img_feat.shape[0] * local_img_feat.shape[1], -1)
p_feat = torch.cat((sub_p, local_img_feat), 1)
local_c = c[side].repeat_interleave(points_size, dim=0)
local_bone_lengths = bone_lengths[side].repeat_interleave(points_size, dim=0)
if side == 'left':
left_batch_size, left_points_size = batch_size, points_size
left_p_r = self.left_decoder(p_feat.float(), local_c.float(), local_bone_lengths.float(), reduce_part=reduce_part)
left_p_r = self.left_decoder.sigmoid(left_p_r)
else:
right_batch_size, right_points_size = batch_size, points_size
right_p_r = self.right_decoder(p_feat.float(), local_c.float(), local_bone_lengths.float(), reduce_part=reduce_part)
right_p_r = self.right_decoder.sigmoid(right_p_r)
else:
if side == 'left':
left_batch_size, left_points_size = 1, 0
left_p_r = torch.empty((1, 0)).cuda()
else:
right_batch_size, right_points_size = 1, 0
right_p_r = torch.empty((1, 0)).cuda()
if reduce_part:
if right_points_size > 0:
right_p_r, _ = right_p_r.max(1, keepdim=True)
if left_points_size > 0:
left_p_r, _ = left_p_r.max(1, keepdim=True)
left_p_r = left_p_r.reshape(left_batch_size, left_points_size)
right_p_r = right_p_r.reshape(right_batch_size, right_points_size)
if test:
final_left_p_r = torch.zeros((left_p.shape[1])).cuda()
final_left_p_r[left_valid[0]] = left_p_r.squeeze()
final_right_p_r = torch.zeros((right_p.shape[1])).cuda()
final_right_p_r[right_valid[0]] = right_p_r.squeeze()
return final_left_p_r.unsqueeze(0), final_right_p_r.unsqueeze(0)
return left_p_r, right_p_r
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 7,445 | 41.067797 | 159 | py |
Im2Hands | Im2Hands-main/artihand/nasa/models/core_ref_occ.py | import sys
import torch
import torch.nn as nn
from torch import distributions as dist
from torch.nn.functional import grid_sample
from im2mesh.common import make_3d_grid
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
from dependencies.intaghand.models.encoder import ResNetSimple
from dependencies.intaghand.models.model_attn.img_attn import *
from dependencies.intaghand.models.model_attn.self_attn import *
from dependencies.airnets.AIRnet import PointTransformerEncoderV2, PointTransformerDecoderOcc
def extract_local_img_feat(img_feat, camera_params, p, root_rot_mat, side='right', anchor=False, test=False):
if anchor:
p = p / 1000
p = p.float()
if side == 'left':
img_p = p + (camera_params[f'{side}_root_xyz'].cuda().unsqueeze(1)) * torch.Tensor([-1., 1., 1.]).cuda()
else:
img_p = p + (camera_params[f'{side}_root_xyz'].cuda().unsqueeze(1))
else:
p = p * 0.4
img_p = torch.bmm(xyz_to_xyz1(p), root_rot_mat)[:, :, :3]
img_p = img_p + (camera_params[f'{side}_root_xyz'].cuda().unsqueeze(1))
if side == 'left':
img_p = img_p * torch.Tensor([-1., 1., 1.]).cuda()
img_coor_p = img_p = torch.bmm(img_p, camera_params['R'].transpose(1,2).cuda()) + camera_params['T'].cuda().unsqueeze(1)
img_p = torch.bmm(img_p * 1000, camera_params['camera'].transpose(1,2).cuda().float())
proj_img_p = torch.zeros((img_p.shape[0], img_p.shape[1], 2)).cuda()
for i in range(proj_img_p.shape[0]):
proj_img_p[i] = img_p[i, :, :2] / img_p[i, :, 2:]
proj_img_p = (proj_img_p - 128) / 128
sub_img_feat = grid_sample(img_feat, proj_img_p.unsqueeze(2)[:,:,:,:2], align_corners=True)[:, :, :, 0]
sub_img_feat = sub_img_feat.permute(0,2,1)
''' # for debugging
import cv2
img = cv2.imread(camera_params['img_path'][0])
vis_p = vis_p[0]
for idx in range(vis_p.shape[0]):
pt = vis_p[idx]
if pt.min() < 0 or pt.max() > 255: continue
img[int(pt[1]), int(pt[0])] = [255, 0, 255]
if anchor:
cv2.imwrite('debug/anchor_check_%s.jpg' % side, img)
else:
cv2.imwrite('debug/check_%s.jpg' % side, img)
'''
return sub_img_feat, img_coor_p * torch.Tensor([1., -1., -1.]).cuda()
class ArticulatedHandNetRefOcc(nn.Module):
''' Occupancy Network class.
Args:
device (device): torch device
'''
def __init__(self, init_occ_estimator, device=None):
super().__init__()
self.init_occ = init_occ_estimator
self.image_encoder = ResNetSimple(model_type='resnet50',
pretrained=True,
fmapDim=[128, 128, 128, 128],
handNum=2,
heatmapDim=21)
self.image_final_layer = nn.Conv2d(256, 32, 1)
self.hms_global_layer = nn.Conv2d(128, 128, 8)
self.dp_global_layer = nn.Conv2d(128, 128, 8)
self.trans_enc = PointTransformerEncoderV2(npoints_per_layer=[512, 256, 128], nneighbor=16, nneighbor_reduced=16, nfinal_transformers=3, d_transformer=256, d_reduced=120, full_SA=True, has_features=True)
self.trans_dec = PointTransformerDecoderOcc(dim_inp=256, dim=200, nneigh=9, hidden_dim=32, return_feature=True)
self.context_enc = nn.Sequential(nn.Linear(256*3, 256))
self._device = device
def forward(self, img, camera_params, inputs, p, anchor_points, root_rot_mat, bone_lengths, sample=True, pen=False, img_feat=None, test=False, **kwargs):
''' Performs a forward pass through the network.'''
img = img.cuda()
if img_feat is None:
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.image_encoder(img)
hms_global = self.hms_global_layer(hms_fmaps[0]).squeeze(-1).squeeze(-1)
dp_global = self.dp_global_layer(dp_fmaps[0]).squeeze(-1).squeeze(-1)
img_global = torch.cat([hms_global, dp_global], 1)
img_f = nn.functional.interpolate(img_fmaps[-1], size=[256, 256], mode='bilinear')
hms_f = nn.functional.interpolate(hms_fmaps[-1], size=[256, 256], mode='bilinear')
dp_f = nn.functional.interpolate(dp_fmaps[-1], size=[256, 256], mode='bilinear')
img_feat = torch.cat((hms_f, dp_f), 1)
img_feat = self.image_final_layer(img_feat)
else:
img_feat, img_global = img_feat
left_query_img_feat, left_query_pts = extract_local_img_feat(img_feat, camera_params, p['left'], root_rot_mat['left'], side='left', test=test)
right_query_img_feat, right_query_pts = extract_local_img_feat(img_feat, camera_params, p['right'], root_rot_mat['right'], side='right', test=test)
query_pts = {'left': left_query_pts, 'right': right_query_pts}
query_img_feat = {'left': left_query_img_feat, 'right': right_query_img_feat}
# swap query points for penetration check later
if pen:
pen_query_pts = {'left': right_query_pts, 'right': left_query_pts}
pen_query_img_feat = {'left': right_query_img_feat, 'right': left_query_img_feat}
with torch.no_grad():
left_p_r, right_p_r = self.init_occ(img, camera_params, inputs, p, root_rot_mat, bone_lengths, sample=sample)
if pen:
pen_left_p_r, pen_right_p_r = self.init_occ(img, camera_params, inputs, p, root_rot_mat, bone_lengths, sample=sample, pen=True)
init_p_r = {'left': left_p_r, 'right': right_p_r}
if pen:
pen_init_p_r = {'left': pen_left_p_r, 'right': pen_right_p_r}
left_anchor_img_feat, left_anchor_pts = extract_local_img_feat(img_feat, camera_params, anchor_points['left'], root_rot_mat['left'], side='left', anchor=True)
right_anchor_img_feat, right_anchor_pts = extract_local_img_feat(img_feat, camera_params, anchor_points['right'], root_rot_mat['right'], side='right', anchor=True)
left_labels = torch.FloatTensor([1, 0]).unsqueeze(0).repeat_interleave(left_anchor_img_feat.shape[0], dim=0).cuda()
left_labels = left_labels.unsqueeze(1).repeat_interleave(left_anchor_img_feat.shape[1], 1)
left_anchor_feat = torch.cat([left_anchor_img_feat, left_labels], 2)
right_labels = torch.FloatTensor([0, 1]).unsqueeze(0).repeat_interleave(right_anchor_img_feat.shape[0], dim=0).cuda()
right_labels = right_labels.unsqueeze(1).repeat_interleave(right_anchor_img_feat.shape[1], 1)
right_anchor_feat = torch.cat([right_anchor_img_feat, right_labels], 2)
# normalize
min_xyz = torch.min(torch.cat([left_anchor_pts, right_anchor_pts], 1), 1)[0]
max_xyz = torch.max(torch.cat([left_anchor_pts, right_anchor_pts], 1), 1)[0]
center_xyz = (max_xyz.unsqueeze(1) + min_xyz.unsqueeze(1)) / 2
left_anchor_pts -= center_xyz
right_anchor_pts -= center_xyz
query_pts['left'] -= center_xyz
query_pts['right'] -= center_xyz
left_pt_feat = self.trans_enc(torch.cat((left_anchor_pts, left_anchor_feat), 2))
right_pt_feat = self.trans_enc(torch.cat((right_anchor_pts, right_anchor_feat), 2))
anchor_feat = {'left': left_pt_feat, 'right': right_pt_feat}
ref_left_p_r, ref_right_p_r = self.decode(img_feat, camera_params, inputs, query_pts, query_img_feat, init_p_r, anchor_feat, img_global, root_rot_mat, bone_lengths, **kwargs)
if pen:
pen_ref_left_p_r, pen_ref_right_p_r = self.decode(img_feat, camera_params, inputs, pen_query_pts, pen_query_img_feat, pen_init_p_r, anchor_feat, img_global, root_rot_mat, bone_lengths, **kwargs)
return ref_left_p_r, ref_right_p_r, pen_ref_left_p_r, pen_ref_right_p_r
return ref_left_p_r, ref_right_p_r
def decode(self, img_feat, camera_params, c, p, p_img_feat, init_p_r, anchor_feat, img_global_feat, root_rot_mat, bone_lengths, **kwargs):
''' Returns occupancy probabilities for the sampled points.'''
left_z = anchor_feat['left']['z']
right_z = anchor_feat['right']['z']
anchor_feat['left']['z'] = self.context_enc(torch.cat((left_z, right_z, img_global_feat), 1))
anchor_feat['right']['z'] = self.context_enc(torch.cat((right_z, left_z, img_global_feat), 1))
left_p_feat = torch.cat((p['left'], p_img_feat['left'], init_p_r['left'].unsqueeze(-1).repeat_interleave(32, dim=-1)), 2)
right_p_feat = torch.cat((p['right'], p_img_feat['right'], init_p_r['right'].unsqueeze(-1).repeat_interleave(32, dim=-1)), 2)
if left_p_feat.shape[1] > 0:
left_res_occ = self.trans_dec(left_p_feat, anchor_feat['left']).squeeze(-1)
else:
left_res_occ = torch.Tensor((0)).cuda()
if right_p_feat.shape[1] > 0:
right_res_occ = self.trans_dec(right_p_feat, anchor_feat['right']).squeeze(-1)
else:
right_res_occ = torch.Tensor((0)).cuda()
final_left_occ = torch.nn.functional.sigmoid(left_res_occ)
final_right_occ = torch.nn.functional.sigmoid(right_res_occ)
return final_left_occ, final_right_occ
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 9,465 | 42.824074 | 211 | py |
Im2Hands | Im2Hands-main/artihand/nasa/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class SimpleDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
# xyz_in_all=None,
use_sigmoid=False,
latent_dropout=False,
):
super(SimpleDecoder, self).__init__()
def make_sequence():
return []
dims = [latent_size + 3] + dims + [1]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
# self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for layer in range(0, self.num_layers - 1):
if layer + 1 in latent_in:
out_dim = dims[layer + 1] - dims[0]
else:
out_dim = dims[layer + 1]
# if self.xyz_in_all and layer != self.num_layers - 2:
# out_dim -= 3
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(layer),
nn.utils.weight_norm(nn.Linear(dims[layer], out_dim)),
)
else:
setattr(self, "lin" + str(layer), nn.Linear(dims[layer], out_dim))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(layer), nn.LayerNorm(out_dim))
# print(dims[layer], out_dim)
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, xyz, latent, reduce_part=False):
batch_size = xyz.size(0)
# print("latent size", latent.size())
# print(latent)
# reshape from [batch_size, 16, 4, 4] to [batch_size, 256]
latent = latent.reshape(batch_size, -1)
# print("latent size", latent.size())
# print(latent)
# print("xyz size", xyz.size())
input = torch.cat([latent, xyz], 1)
x = input
for layer in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input], 1)
# elif layer != 0 and self.xyz_in_all:
# x = torch.cat([x, xyz], 1)
x = lin(x)
# last layer Sigmoid
if layer == self.num_layers - 2 and self.use_sigmoid:
x = self.sigmoid(x)
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(layer))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and layer in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
# if hasattr(self, "th"):
# x = self.th(x)
return x
class PiecewiseRigidDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
num_bones=16,
projection=None,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
smooth_max=False,
use_sigmoid=False,
latent_dropout=False,
):
super(PiecewiseRigidDecoder, self).__init__()
def make_sequence():
return []
if projection is not None:
dims = [3] + dims + [1]
else:
dims = [latent_size + 3] + dims + [1]
self.num_layers = len(dims)
self.num_bones = num_bones
self.projection = projection
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
self.weight_norm = weight_norm
for bone in range(self.num_bones):
for layer in range(0, self.num_layers - 1):
# if layer + 1 in latent_in:
# out_dim = dims[layer + 1] - dims[0]
# else:
# out_dim = dims[layer + 1]
if layer in latent_in:
in_dim = dims[layer] + dims[0]
else:
in_dim = dims[layer]
out_dim = dims[layer + 1]
# print(in_dim, out_dim)
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(bone) + "_" + str(layer),
nn.utils.weight_norm(nn.Linear(in_dim, out_dim)),
# nn.utils.weight_norm(nn.Conv1d(dims[layer], out_dim, 1)),
)
else:
setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Linear(in_dim, out_dim))
# setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Conv1d(dims[layer], out_dim, 1))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(bone) + "_" + str(layer), nn.LayerNorm(out_dim))
self.smooth_max = smooth_max
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, xyz, latent, reduce_part=True):
batch_size = xyz.size(0)
# print("xyz", xyz)
if self.projection == None:
# [batch_size, 16, 4, 4] -> [batch_size, 16, tranMat(16)]
latent = latent.view(batch_size, latent.size(1), 16)
# [batch_size, 3] -> [batch_size, joints(16), 3]
xyz = xyz.unsqueeze(1).expand(-1, latent.size(1), -1)
# [batch_size, joints(16), xyz(3) + tranMat(16)]
input = torch.cat([latent, xyz], 2)
elif self.projection == 'x':
# concat 1 for homogeneous points. [3] -> [4,1]
xyz = torch.cat([xyz, torch.ones(batch_size, 1, device=xyz.device)], 1).unsqueeze(-1)
# print(xyz.size())
# [batch_size, joints(16), 4, 4] x [batch_size, 1, 4, 1] -> [batch_size, 16, 4, 1]
input = torch.matmul(latent, xyz.unsqueeze(1))
input = input[:, :, :3, 0]
# final input [batch_size, joints(16), projection(3)]
output = torch.zeros([input.size(0), self.num_bones], device=input.device)
for bone in range(self.num_bones):
input_i = input[:, bone, :]
x = input[:, bone, :]
# print('x size', x.size())
for layer in range(0, self.num_layers - 1):
x_prev = x
lin = getattr(self, "lin" + str(bone) + "_" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input_i], 1)
# x = lin(x)
x_out = lin(x)
# last layer Sigmoid
# if layer == self.num_layers - 2 and self.use_sigmoid:
# x_out = self.sigmoid(x_out)
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(bone) + "_" + str(layer))
x_out = bn(x_out)
x_out = self.relu(x_out)
if self.dropout is not None and layer in self.dropout:
x_out = F.dropout(x_out, p=self.dropout_prob, training=self.training)
# residual connection
if layer > 0:
x_out = x_out + x_prev
x = x_out
# if hasattr(self, "th"):
# x = self.th(x)
output[:, bone] = x[:, 0]
if self.smooth_max and reduce_part:
# print("before", output.size())
output = output.logsumexp(1, keepdim=True)
# print("after", output.size())
# Sigmoid
if self.use_sigmoid:
output = self.sigmoid(output)
return output # x
class Sine(nn.Module):
def __init(self):
super().__init__()
def forward(self, input):
# See paper sec. 3.2, final paragraph, and supplement Sec. 1.5 for discussion of factor 30
return torch.sin(30 * input)
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
Crate a two-layers networks with relu activation.
"""
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
class PosEncoder(nn.Module):
'''Module to add positional encoding.'''
def __init__(self, in_features=3):
super().__init__()
self.in_features = in_features
if self.in_features == 3:
self.num_frequencies = 10
self.out_dim = in_features + 2 * in_features * self.num_frequencies
def forward(self, coords):
coords = coords.view(coords.shape[0], -1, self.in_features)
coords_pos_enc = coords
for i in range(self.num_frequencies):
for j in range(self.in_features):
c = coords[..., j]
sin = torch.unsqueeze(torch.sin((2 ** i) * np.pi * c), -1)
cos = torch.unsqueeze(torch.cos((2 ** i) * np.pi * c), -1)
coords_pos_enc = torch.cat((coords_pos_enc, sin, cos), axis=-1)
return coords_pos_enc.reshape(coords.shape[0], -1, self.out_dim)
class PiecewiseDeformableDecoderPIFu(nn.Module):
def __init__(
self,
latent_size,
dims,
use_bone_length=False,
bone_latent_size=0,
num_bones=16,
projection=None,
global_projection=None,
global_pose_projection_size=0,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
smooth_max=False,
use_sigmoid=False,
latent_dropout=False,
combine_final=False,
positional_encoding=False,
actv='leakyrelu',
add_feature_dim=32,
add_feature_layer_idx=1
):
super(PiecewiseDeformableDecoderPIFu, self).__init__()
def make_sequence():
return []
self.add_feature_dim = add_feature_dim
self.add_feature_layer_idx = add_feature_layer_idx
# global pose projection type
if global_projection == 'o':
# origin
latent_size = 3
elif global_projection is None:
# no projection
latent_size = 4 * 4
if positional_encoding:
self.posi_encoder = PosEncoder()
xyz_size = 3 + 3 * 2 * 10
else:
self.posi_encoder = None
xyz_size = 3
# temp!!!
xyz_size = 3 #+ 256
# global pose sub-space projection
self.global_pose_projection_size = global_pose_projection_size
if global_pose_projection_size > 0:
for i in range(num_bones):
setattr(self, "global_proj" + str(i), nn.Linear(latent_size * num_bones, global_pose_projection_size))
dims = [xyz_size + global_pose_projection_size] + dims + [1]
else:
# self.global_pose_projection_layer = None
if projection is not None:
dims = [xyz_size + latent_size * num_bones] + dims + [1]
else:
dims = [xyz_size + latent_size + latent_size * num_bones] + dims + [1]
if use_bone_length:
dims[0] = dims[0] + 1
# bone_latent: the latent size of the vector encoding all bone lengths
self.bone_latent_size = bone_latent_size # 16
if bone_latent_size > 0:
dims[0] = dims[0] + self.bone_latent_size
self.bone_encoder = TwoLayerNet(num_bones, 40, self.bone_latent_size)
self.use_bone_length = use_bone_length
self.num_layers = len(dims)
self.num_bones = num_bones
self.projection = projection
self.global_projection = global_projection
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
# combine final output in the last layer
self.combine_final = combine_final
if self.combine_final:
self.combine_final_layer = nn.Linear(dims[-2] * num_bones, 1)
self.weight_norm = weight_norm
# Part model
for bone in range(self.num_bones):
for layer in range(0, self.num_layers - 1):
# if layer + 1 in latent_in:
# out_dim = dims[layer + 1] - dims[0]
# else:
# out_dim = dims[layer + 1]
if layer in latent_in:
in_dim = dims[layer] + dims[0]
else:
in_dim = dims[layer]
if layer == self.add_feature_layer_idx:
in_dim += self.add_feature_dim
out_dim = dims[layer + 1]
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(bone) + "_" + str(layer),
nn.utils.weight_norm(nn.Linear(in_dim, out_dim)),
# nn.utils.weight_norm(nn.Conv1d(dims[layer], out_dim, 1)),
)
else:
setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Linear(in_dim, out_dim))
# setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Conv1d(dims[layer], out_dim, 1))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(bone) + "_" + str(layer), nn.LayerNorm(out_dim))
if actv == "siren":
if layer == 0:
getattr(self, "lin" + str(bone) + "_" + str(layer)).apply(first_layer_sine_init)
else:
getattr(self, "lin" + str(bone) + "_" + str(layer)).apply(sine_init)
self.smooth_max = smooth_max
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
if actv == "siren":
self.actv = Sine()
else:
self.actv = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, in_feat, latent, bone_lengths=None, reduce_part=True):
# print("xyz", xyz, xyz.shape)
batch_size = in_feat.size(0)
xyz = in_feat[:, :3]
img_feat = in_feat[:, 3:]
# print("batch_size", batch_size)
# Query point projection. Should be (B^-1)(x) by default.
if self.projection is None:
# [batch_size, 16, 4, 4] -> [batch_size, 16, tranMat(16)]
latent_reshape = latent.view(batch_size, latent.size(1), 16)
# [batch_size, 3] -> [batch_size, joints(16), 3]
xyz = xyz.unsqueeze(1).expand(-1, latent_reshape.size(1), -1)
# [batch_size, joints(16), xyz(3) + tranMat(16)]
input = torch.cat([latent_reshape, xyz], 2)
elif self.projection == 'x':
# (B^-1)(x)
# concat 1 for homogeneous points. [3] -> [4,1]
xyz = torch.cat([xyz, torch.ones(batch_size, 1, device=xyz.device)], 1).unsqueeze(-1)
# print(xyz.size())
# [batch_size, joints(16), 4, 4] x [batch_size, 1, 4, 1] -> [batch_size, 16, 4, 1]
#latent = torch.eye(4).cuda().unsqueeze(0).unsqueeze(0) # latent is wrong here
#latent = latent.repeat(xyz.shape[0], 16, 1, 1)
input = torch.matmul(latent.double(), xyz.unsqueeze(1).double())
input = input[:, :, :3, 0]
# final input shape [batch_size, joints(16), projection(3)]
#elif self.projection == 'o':
# # (B^-1)(o)
# pass
# Positional encoding
if self.posi_encoder is not None:
# import pdb; pdb.set_trace()
input = self.posi_encoder(input)
# global latent code projection
if self.global_projection == 'o':
# collections of (B^-1)(o)
global_latent = latent[:, :, :3, 3]
# print("global_latent", global_latent)
global_latent = global_latent.reshape(batch_size, -1)
# print("global size", global_latent.size())
else:
# no projection, just flatten the transformation matrix
global_latent = latent.reshape(batch_size, -1)
# global latent code sub-space projection
# if self.global_pose_projection_layer:
# global_latent = self.global_pose_projection_layer(global_latent)
# Compute global bone length encoding
if self.use_bone_length and self.bone_latent_size > 0:
# print("bone length shape", bone_lengths.shape)
bone_latent = self.bone_encoder(bone_lengths.float())
# print("bone latent shape", bone_latent.shape)
output = torch.zeros([input.size(0), self.num_bones], device=input.device)
# For combining final latent
last_layer_latents = []
## Input to each sub model is [x; (local bone length); (global bone length latent); global latent]
# Input to each sub model is [x(3, fixed); local bone length(1, fixed); global bone length latent(16); global latent(8)]
for bone in range(self.num_bones):
input_i = input[:, bone, :]
# print("input shape", input.shape)
x = input[:, bone, :]
# print("x shape", x.shape)
# concat bone length
# print("bone length shape", bone_lengths.shape)
# print("bone length shape", bone_lengths[:, bone].shape)
if self.use_bone_length:
x = torch.cat([x, bone_lengths[:, bone].unsqueeze(-1)], axis=1)
if self.bone_latent_size > 0:
#x = torch.cat([x, bone_latent, img_feat], axis=1) # this is modified!!!
x = torch.cat([x, bone_latent], axis=1) # this is modified!!!
# print("x after global bone latent", x.shape)
# print('x size', x.size())
# print('global latent', global_latent.size())
# Per-bone global subspace projection
if self.global_pose_projection_size > 0:
global_proj = getattr(self, "global_proj" + str(bone))
# print("global latent code size", global_latent.size())
projected_global_latent = global_proj(global_latent)
x = torch.cat([x, projected_global_latent], 1)
else:
x = torch.cat([x, global_latent], 1)
# print('x before model', x.shape)
for idx, layer in enumerate(range(0, self.num_layers - 1)):
x_prev = x
lin = getattr(self, "lin" + str(bone) + "_" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input_i], 1)
if idx == self.add_feature_layer_idx:
x = torch.cat([x, img_feat], 1)
if layer == self.num_layers - 2 and self.combine_final:
last_layer_latents.append(x)
# print(x.shape)
# x = lin(x)
x_out = lin(x.float())
# last layer
# if layer == self.num_layers - 2:
# Smooth max log-sum-exp
if layer < self.num_layers - 2:
# residual connection
if layer > 0:
x_out = x_out + x_prev
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(bone) + "_" + str(layer))
x_out = bn(x_out)
x_out = self.actv(x_out)
if self.dropout is not None and layer in self.dropout:
x_out = F.dropout(x_out, p=self.dropout_prob, training=self.training)
x = x_out
# if hasattr(self, "th"):
# x = self.th(x)
# print("x_out", x.size())
output[:, bone] = x[:, 0]
if self.combine_final:
# import pdb; pdb.set_trace()
print('final')
output = self.combine_final_layer(torch.cat(last_layer_latents, dim=-1))
# import pdb; pdb.set_trace()
# print("output shape", output.size())
# if self.smooth_max and reduce_part:
# print("before", output.size())
# output = output.logsumexp(1, keepdim=True)
# print("after", output.size())
# # Sigmoid
# if self.use_sigmoid:
# print("sigmoid")
# output = self.sigmoid(output)
# output = nn.Softmax(dim=1)(output)
# print(output[0])
# print(output)
# output, _ = output.max(1, keepdim=True)
# print("----output", output.size())
# print("-------", x.size())
return output # x
class PiecewiseDeformableDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
use_bone_length=False,
bone_latent_size=0,
num_bones=16,
projection=None,
global_projection=None,
global_pose_projection_size=0,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
smooth_max=False,
use_sigmoid=False,
latent_dropout=False,
combine_final=False,
positional_encoding=False,
actv='leakyrelu',
):
super(PiecewiseDeformableDecoder, self).__init__()
def make_sequence():
return []
# global pose projection type
if global_projection == 'o':
# origin
latent_size = 3
elif global_projection is None:
# no projection
latent_size = 4 * 4
if positional_encoding:
self.posi_encoder = PosEncoder()
xyz_size = 3 + 3 * 2 * 10
else:
self.posi_encoder = None
xyz_size = 3
# global pose sub-space projection
self.global_pose_projection_size = global_pose_projection_size
if global_pose_projection_size > 0:
for i in range(num_bones):
setattr(self, "global_proj" + str(i), nn.Linear(latent_size * num_bones, global_pose_projection_size))
dims = [xyz_size + global_pose_projection_size] + dims + [1]
else:
# self.global_pose_projection_layer = None
if projection is not None:
dims = [xyz_size + latent_size * num_bones] + dims + [1]
else:
dims = [xyz_size + latent_size + latent_size * num_bones] + dims + [1]
if use_bone_length:
dims[0] = dims[0] + 1
# bone_latent: the latent size of the vector encoding all bone lengths
self.bone_latent_size = bone_latent_size # 16
if bone_latent_size > 0:
dims[0] = dims[0] + self.bone_latent_size
self.bone_encoder = TwoLayerNet(num_bones, 40, self.bone_latent_size)
self.use_bone_length = use_bone_length
self.num_layers = len(dims)
self.num_bones = num_bones
self.projection = projection
self.global_projection = global_projection
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
# combine final output in the last layer
self.combine_final = combine_final
if self.combine_final:
self.combine_final_layer = nn.Linear(dims[-2] * num_bones, 1)
self.weight_norm = weight_norm
# Part model
for bone in range(self.num_bones):
for layer in range(0, self.num_layers - 1):
# if layer + 1 in latent_in:
# out_dim = dims[layer + 1] - dims[0]
# else:
# out_dim = dims[layer + 1]
if layer in latent_in:
in_dim = dims[layer] + dims[0]
else:
in_dim = dims[layer]
out_dim = dims[layer + 1]
# print(in_dim, out_dim)
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(bone) + "_" + str(layer),
nn.utils.weight_norm(nn.Linear(in_dim, out_dim)),
# nn.utils.weight_norm(nn.Conv1d(dims[layer], out_dim, 1)),
)
else:
setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Linear(in_dim, out_dim))
# setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Conv1d(dims[layer], out_dim, 1))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(bone) + "_" + str(layer), nn.LayerNorm(out_dim))
if actv == "siren":
if layer == 0:
getattr(self, "lin" + str(bone) + "_" + str(layer)).apply(first_layer_sine_init)
else:
getattr(self, "lin" + str(bone) + "_" + str(layer)).apply(sine_init)
self.smooth_max = smooth_max
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
if actv == "siren":
self.actv = Sine()
else:
self.actv = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, xyz, latent, bone_lengths=None, reduce_part=True):
# print("xyz", xyz, xyz.shape)
batch_size = xyz.size(0)
# print("batch_size", batch_size)
# Query point projection. Should be (B^-1)(x) by default.
if self.projection is None:
# [batch_size, 16, 4, 4] -> [batch_size, 16, tranMat(16)]
latent_reshape = latent.view(batch_size, latent.size(1), 16)
# [batch_size, 3] -> [batch_size, joints(16), 3]
xyz = xyz.unsqueeze(1).expand(-1, latent_reshape.size(1), -1)
# [batch_size, joints(16), xyz(3) + tranMat(16)]
input = torch.cat([latent_reshape, xyz], 2)
elif self.projection == 'x':
# (B^-1)(x)
# concat 1 for homogeneous points. [3] -> [4,1]
xyz = torch.cat([xyz, torch.ones(batch_size, 1, device=xyz.device)], 1).unsqueeze(-1)
# print(xyz.size())
# [batch_size, joints(16), 4, 4] x [batch_size, 1, 4, 1] -> [batch_size, 16, 4, 1]
#latent = torch.eye(4).cuda().unsqueeze(0).unsqueeze(0) # latent is wrong here
#latent = latent.repeat(xyz.shape[0], 16, 1, 1)
input = torch.matmul(latent.double(), xyz.unsqueeze(1).double())
input = input[:, :, :3, 0]
# final input shape [batch_size, joints(16), projection(3)]
#elif self.projection == 'o':
# # (B^-1)(o)
# pass
# Positional encoding
if self.posi_encoder is not None:
# import pdb; pdb.set_trace()
input = self.posi_encoder(input)
# global latent code projection
if self.global_projection == 'o':
# collections of (B^-1)(o)
global_latent = latent[:, :, :3, 3]
# print("global_latent", global_latent)
global_latent = global_latent.reshape(batch_size, -1)
# print("global size", global_latent.size())
else:
# no projection, just flatten the transformation matrix
global_latent = latent.reshape(batch_size, -1)
# global latent code sub-space projection
# if self.global_pose_projection_layer:
# global_latent = self.global_pose_projection_layer(global_latent)
# Compute global bone length encoding
if self.use_bone_length and self.bone_latent_size > 0:
# print("bone length shape", bone_lengths.shape)
bone_latent = self.bone_encoder(bone_lengths.float())
# print("bone latent shape", bone_latent.shape)
output = torch.zeros([input.size(0), self.num_bones], device=input.device)
# For combining final latent
last_layer_latents = []
## Input to each sub model is [x; (local bone length); (global bone length latent); global latent]
# Input to each sub model is [x(3, fixed); local bone length(1, fixed); global bone length latent(16); global latent(8)]
for bone in range(self.num_bones):
input_i = input[:, bone, :]
# print("input shape", input.shape)
x = input[:, bone, :]
# print("x shape", x.shape)
# concat bone length
# print("bone length shape", bone_lengths.shape)
# print("bone length shape", bone_lengths[:, bone].shape)
if self.use_bone_length:
x = torch.cat([x, bone_lengths[:, bone].unsqueeze(-1)], axis=1)
if self.bone_latent_size > 0:
x = torch.cat([x, bone_latent], axis=1)
# print("x after global bone latent", x.shape)
# print('x size', x.size())
# print('global latent', global_latent.size())
# Per-bone global subspace projection
if self.global_pose_projection_size > 0:
global_proj = getattr(self, "global_proj" + str(bone))
# print("global latent code size", global_latent.size())
projected_global_latent = global_proj(global_latent)
x = torch.cat([x, projected_global_latent], 1)
else:
x = torch.cat([x, global_latent], 1)
# print('x before model', x.shape)
for layer in range(0, self.num_layers - 1):
x_prev = x
lin = getattr(self, "lin" + str(bone) + "_" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input_i], 1)
if layer == self.num_layers - 2 and self.combine_final:
last_layer_latents.append(x)
# print(x.shape)
# x = lin(x)
x_out = lin(x.float())
# last layer
# if layer == self.num_layers - 2:
# Smooth max log-sum-exp
if layer < self.num_layers - 2:
# residual connection
if layer > 0:
x_out = x_out + x_prev
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(bone) + "_" + str(layer))
x_out = bn(x_out)
x_out = self.actv(x_out)
if self.dropout is not None and layer in self.dropout:
x_out = F.dropout(x_out, p=self.dropout_prob, training=self.training)
x = x_out
# if hasattr(self, "th"):
# x = self.th(x)
# print("x_out", x.size())
output[:, bone] = x[:, 0]
if self.combine_final:
# import pdb; pdb.set_trace()
output = self.combine_final_layer(torch.cat(last_layer_latents, dim=-1))
# import pdb; pdb.set_trace()
# print("output shape", output.size())
# if self.smooth_max and reduce_part:
# print("before", output.size())
# output = output.logsumexp(1, keepdim=True)
# print("after", output.size())
# # Sigmoid
# if self.use_sigmoid:
# print("sigmoid")
# output = self.sigmoid(output)
# output = nn.Softmax(dim=1)(output)
# print(output[0])
# print(output)
# output, _ = output.max(1, keepdim=True)
# print("----output", output.size())
# print("-------", x.size())
return output # x
def sine_init(m):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size(-1)
# See supplement Sec. 1.5 for discussion of factor 30
m.weight.uniform_(-np.sqrt(6 / num_input) / 30, np.sqrt(6 / num_input) / 30)
def first_layer_sine_init(m):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size(-1)
# See paper sec. 3.2, final paragraph, and supplement Sec. 1.5 for discussion of factor 30
m.weight.uniform_(-1 / num_input, 1 / num_input)
class SdfDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
use_bone_length=False,
bone_latent_size=0,
num_bones=16,
projection=None,
global_projection=None,
global_pose_projection_size=0,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
smooth_max=False,
use_sigmoid=False,
latent_dropout=False,
combine_final=False,
positional_encoding=False,
actv='leakyrelu',
):
super(SdfDecoder, self).__init__()
# global pose projection type
if global_projection == 'o':
# origin
latent_size = 3
elif global_projection is None:
# no projection
latent_size = 4 * 4
if positional_encoding:
self.posi_encoder = PosEncoder()
xyz_size = 3 + 3 * 2 * 10
else:
self.posi_encoder = None
xyz_size = 3
# global pose sub-space projection
self.global_pose_projection_size = global_pose_projection_size
if global_pose_projection_size > 0:
for i in range(num_bones):
setattr(self, "global_proj" + str(i), nn.Linear(latent_size * num_bones, global_pose_projection_size))
# dims = [xyz_size + global_pose_projection_size] + dims + [1]
dims = [(xyz_size + global_pose_projection_size) * num_bones] + dims + [1]
# print("use global projection")
else:
# self.global_pose_projection_layer = None
if projection is not None:
dims = [xyz_size + latent_size * num_bones] + dims + [1]
else:
dims = [xyz_size + latent_size + latent_size * num_bones] + dims + [1]
# print("dims", dims)
if use_bone_length:
# dims[0] = dims[0] + 1
# bone_latent: the latent size of the vector encoding all bone lengths
self.bone_latent_size = bone_latent_size # 16
if bone_latent_size > 0:
dims[0] = dims[0] + self.bone_latent_size
self.bone_encoder = TwoLayerNet(num_bones, 40, self.bone_latent_size)
self.use_bone_length = use_bone_length
self.num_layers = len(dims)
self.num_bones = num_bones
self.projection = projection
self.global_projection = global_projection
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
self.weight_norm = weight_norm
for layer in range(0, self.num_layers - 1):
# if layer + 1 in latent_in:
# out_dim = dims[layer + 1] - dims[0]
# else:
# out_dim = dims[layer + 1]
if layer in latent_in:
in_dim = dims[layer] + dims[0]
else:
in_dim = dims[layer]
out_dim = dims[layer + 1]
# print(in_dim, out_dim)
setattr(self, "lin" + "_" + str(layer), nn.Linear(in_dim, out_dim))
# setattr(self, "lin" + str(bone) + "_" + str(layer), nn.Conv1d(dims[layer], out_dim, 1))
self.smooth_max = smooth_max
self.use_sigmoid = use_sigmoid
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
# self.relu = nn.ReLU()
if actv == "siren":
self.actv = Sine()
else:
self.actv = nn.LeakyReLU(0.1)
self.dropout_prob = dropout_prob
self.dropout = dropout
# self.th = nn.Tanh()
# input: N x (L+3)
def forward(self, xyz, latent, bone_lengths=None, reduce_part=True):
# print("xyz", xyz, xyz.shape)
batch_size = xyz.size(0)
# print("batch_size", batch_size)
# Query point projection. Should be (B^-1)(x) by default.
if self.projection is None:
# [batch_size, 16, 4, 4] -> [batch_size, 16, tranMat(16)]
latent_reshape = latent.view(batch_size, latent.size(1), 16)
# [batch_size, 3] -> [batch_size, joints(16), 3]
xyz = xyz.unsqueeze(1).expand(-1, latent_reshape.size(1), -1)
# [batch_size, joints(16), xyz(3) + tranMat(16)]
input = torch.cat([latent_reshape, xyz], 2)
elif self.projection == 'x':
# (B^-1)(x)
# concat 1 for homogeneous points. [3] -> [4,1]
xyz = torch.cat([xyz, torch.ones(batch_size, 1, device=xyz.device)], 1).unsqueeze(-1)
# print(xyz.size())
# [batch_size, joints(16), 4, 4] x [batch_size, 1, 4, 1] -> [batch_size, 16, 4, 1]
input = torch.matmul(latent, xyz.unsqueeze(1))
input = input[:, :, :3, 0]
# final input shape [batch_size, joints(16), projection(3)]
# Positional encoding
if self.posi_encoder is not None:
# import pdb; pdb.set_trace()
input = self.posi_encoder(input)
# global latent code projection
if self.global_projection == 'o':
# collections of (B^-1)(o)
global_latent = latent[:, :, :3, 3]
# print("global_latent", global_latent)
global_latent = global_latent.reshape(batch_size, -1)
# print("global size", global_latent.size())
else:
# no projection, just flatten the transformation matrix
global_latent = latent.reshape(batch_size, -1)
# global latent code sub-space projection
# if self.global_pose_projection_layer:
# global_latent = self.global_pose_projection_layer(global_latent)
# Compute global bone length encoding
if self.use_bone_length and self.bone_latent_size > 0:
# print("bone length shape", bone_lengths.shape)
bone_latent = self.bone_encoder(bone_lengths)
# print("bone latent shape", bone_latent.shape)
output = torch.zeros([input.size(0), self.num_bones], device=input.device)
bone_input_list = []
## Input to each sub model is [x; (local bone length); (global bone length latent); global latent]
# Input to each sub model is [x(3, fixed); local bone length(1, fixed); global bone length latent(16); global latent(8)]
for bone in range(self.num_bones):
input_i = input[:, bone, :]
# print("input shape", input.shape)
x = input[:, bone, :]
# print("x shape", x.shape)
# concat bone length
# print("bone length shape", bone_lengths.shape)
# print("bone length shape", bone_lengths[:, bone].shape)
# print('x size', x.size())
# print('global latent', global_latent.size())
# Per-bone global subspace projection
if self.global_pose_projection_size > 0:
global_proj = getattr(self, "global_proj" + str(bone))
# print("global latent code size", global_latent.size())
projected_global_latent = global_proj(global_latent)
x = torch.cat([x, projected_global_latent], 1)
else:
x = torch.cat([x, global_latent], 1)
bone_input_list.append(x)
# import pdb; pdb.set_trace()
x = torch.cat(bone_input_list, 1)
if self.use_bone_length:
# x = torch.cat([x, bone_lengths[:, bone].unsqueeze(-1)], axis=1)
if self.bone_latent_size > 0:
x = torch.cat([x, bone_latent], axis=1)
# print("x after global bone latent", x.shape)
# print('x before model', x.shape)
for layer in range(0, self.num_layers - 1):
x_prev = x
lin = getattr(self, "lin" + "_" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input_i], 1)
x_out = lin(x)
# last layer
# if layer == self.num_layers - 2:
# Smooth max log-sum-exp
if layer < self.num_layers - 2:
# residual connection
if layer > 0:
x_out = x_out + x_prev
x_out = self.actv(x_out)
if self.dropout is not None and layer in self.dropout:
x_out = F.dropout(x_out, p=self.dropout_prob, training=self.training)
x = x_out
# import pdb; pdb.set_trace()
# # if hasattr(self, "th"):
# # x = self.th(x)
# # print("x_out", x.size())
# output[:, bone] = x[:, 0]
# import pdb; pdb.set_trace()
# print("output shape", output.size())
# if self.smooth_max and reduce_part:
# print("before", output.size())
# output = output.logsumexp(1, keepdim=True)
# print("after", output.size())
# # Sigmoid
# if self.use_sigmoid:
# print("sigmoid")
# output = self.sigmoid(output)
# output = nn.Softmax(dim=1)(output)
# print(output[0])
# print(output)
# output, _ = output.max(1, keepdim=True)
# print("----output", output.size())
# print("-------", x.size())
return x_out # output # x
| 44,495 | 37.259673 | 128 | py |
Im2Hands | Im2Hands-main/artihand/nasa/models/core_kpts_ref.py | import sys
import torch
import torch.nn as nn
from torch import distributions as dist
from dependencies.intaghand.models.encoder import ResNetSimple
from dependencies.intaghand.models.model_attn.img_attn import *
from dependencies.intaghand.models.model_attn.self_attn import *
from dependencies.intaghand.models.model_attn.gcn import GraphLayer
def hand_joint_graph(v_num=21):
'''
connected by only root
'''
graph = torch.zeros((v_num,v_num))
edges = torch.tensor([[0,13],
[13,14],
[14,15],
[15,16],
[0,1],
[1,2],
[2,3],
[3,17],
[0,4],
[4,5],
[5,6],
[6,18],
[0,10],
[10,11],
[11,12],
[12,19],
[0,7],
[7,8],
[8,9],
[9,20]])
graph[edges[:,0], edges[:,1]] = 1.0
return graph
class DualGraphLayer(nn.Module):
def __init__(self,
verts_in_dim=48,
verts_in_dim_2=48,
verts_out_dim=16,
graph_L_Left=None,
graph_L_Right=None,
graph_k=2,
graph_layer_num=3,
img_size=64,
img_f_dim=64,
grid_size=8,
grid_f_dim=64,
n_heads=4,
dropout=0,
is_inter_attn=True
):
super().__init__()
self.verts_num = graph_L_Left.shape[0]
self.verts_in_dim = verts_in_dim
self.img_size = img_size
self.img_f_dim = img_f_dim
self.inter_attn = is_inter_attn
self.graph_left = GraphLayer(verts_in_dim, verts_out_dim,
graph_L_Left, graph_k, graph_layer_num,
dropout)
self.graph_right = GraphLayer(verts_in_dim, verts_out_dim,
graph_L_Right, graph_k, graph_layer_num,
dropout)
self.img_ex_left = img_ex(img_size, img_f_dim,
grid_size, grid_f_dim,
verts_in_dim_2,
n_heads=n_heads,
dropout=dropout)
self.img_ex_right = img_ex(img_size, img_f_dim,
grid_size, grid_f_dim,
verts_in_dim_2,
n_heads=n_heads,
dropout=dropout)
def forward(self, left_joint_ft, right_joint_ft, img_f):
BS1, V, f = left_joint_ft.shape
assert V == self.verts_num
assert f == self.verts_in_dim
BS2, V, f = right_joint_ft.shape
assert V == self.verts_num
assert f == self.verts_in_dim
BS3, C, H, W = img_f.shape
assert C == self.img_f_dim
assert H == self.img_size
assert W == self.img_size
assert BS1 == BS2
assert BS2 == BS3
BS = BS1
left_joint_ft_graph = self.graph_left(left_joint_ft)
right_joint_ft_graph = self.graph_right(right_joint_ft)
left_joint_ft = self.img_ex_left(img_f, torch.cat([left_joint_ft, left_joint_ft_graph], 2))
right_joint_ft = self.img_ex_right(img_f, torch.cat([right_joint_ft, right_joint_ft_graph], 2))
return left_joint_ft, right_joint_ft
class ArticulatedHandNetKptsRef(nn.Module):
''' Keypoint Refinement Network class.
Args:
device (device): torch device
'''
def __init__(self, device='cuda'):
super().__init__()
self._device = device
verts_in_dim = [48, 64, 80, 96]
verts_in_dim_2 = [64, 80, 96, 112]
verts_out_dim = [16, 16, 16, 16]
graph_L_Left = [
hand_joint_graph().to(device),
] * 4
graph_L_Right = [
hand_joint_graph().to(device),
] * 4
graph_k =[2, 2, 2, 2]
graph_layer_num = [2, 2, 2, 2]
img_size = [64, 64, 64, 64]
img_f_dim = [256, 256, 256, 256]
grid_size = [8, 8, 8, 8]
grid_f_dim = [64, 82, 96, 112]
n_heads = 4
self.image_encoder = ResNetSimple(model_type='resnet50',
pretrained=True,
fmapDim=[128, 128, 128, 128],
handNum=2,
heatmapDim=21)
self.img_proj_layer = nn.Conv2d(512, 256, 1)
self.img_final_layer = nn.Conv2d(256, 32, 1)
self.left_id_emb = nn.Embedding(21, 16)
self.right_id_emb = nn.Embedding(21, 16)
self.left_pt_emb = nn.Sequential(nn.Conv1d(3, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 32, 1))
self.right_pt_emb = nn.Sequential(nn.Conv1d(3, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 32, 1))
self.left_reg = nn.Sequential(nn.Conv1d(112, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 3, 1))
self.right_reg = nn.Sequential(nn.Conv1d(112, 32, 1),
nn.ReLU(),
nn.Dropout(0.01),
nn.Conv1d(32, 3, 1))
self._device = device
self.gcn_layers = nn.ModuleList()
for i in range(len(verts_in_dim)):
self.gcn_layers.append(DualGraphLayer(verts_in_dim=verts_in_dim[i],
verts_in_dim_2=verts_in_dim_2[i],
verts_out_dim=verts_out_dim[i],
graph_L_Left=graph_L_Left[i].detach().cpu().numpy(),
graph_L_Right=graph_L_Right[i].detach().cpu().numpy(),
graph_k=graph_k[i],
graph_layer_num=graph_layer_num[i],
img_size=img_size[i],
img_f_dim=img_f_dim[i],
grid_size=grid_size[i],
grid_f_dim=grid_f_dim[i],
n_heads=n_heads,
dropout=0.01))
def forward(self, img, camera_params, joints, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
# 2D feature generate
batch_size = img.shape[0]
hms, mask, dp, img_fmaps, hms_fmaps, dp_fmaps = self.image_encoder(img.cuda())
img_f, hms_f, dp_f = img_fmaps[-1], hms_fmaps[-1], dp_fmaps[-1]
# position embedding
joint_ids = torch.arange(21, dtype=torch.long, device=self._device)
joint_ids = joint_ids.unsqueeze(0).repeat(batch_size, 1)
left_id_emb = self.left_id_emb(joint_ids)
right_id_emb = self.right_id_emb(joint_ids)
# point embedding
left_pt_emb = self.left_pt_emb(joints['left'].transpose(1,2).float())
left_joint_ft = torch.cat((left_id_emb, left_pt_emb.transpose(1,2)), 2) # [32, 21, 48]
right_pt_emb = self.right_pt_emb(joints['right'].transpose(1,2).float())
right_joint_ft = torch.cat((right_id_emb, right_pt_emb.transpose(1,2)), 2) # [32, 21, 48]
img_ft = torch.cat((img_f, hms_f, dp_f), 1)
img_ft = self.img_proj_layer(img_ft) # [bs, 64, 64, 64]
for i in range(len(self.gcn_layers)):
left_joint_ft, right_joint_ft = self.gcn_layers[i](left_joint_ft, right_joint_ft, img_ft)
left_joint_res = self.left_reg(left_joint_ft.transpose(1,2)) # 32, 3, 21
right_joint_res = self.left_reg(right_joint_ft.transpose(1,2))
'''
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(left_joints[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/left_joints_org.ply", pcd)
pcd.points = o3d.utility.Vector3dVector(right_joints[0].cpu().detach().numpy())
o3d.io.write_point_cloud("debug/right_joints_org.ply", pcd)
'''
#import pdb; pdb.set_trace()
left_joints = left_joint_res.transpose(1,2)
right_joints = right_joint_res.transpose(1,2)
return left_joints, right_joints
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 9,607 | 37.432 | 104 | py |
Im2Hands | Im2Hands-main/artihand/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from torchvision.utils import save_image
import im2mesh.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'trans_matrix':
visualize_transmatrix(data, out_file=out_file)
elif data_type == 'img':
if data.dim() == 3:
data = data.unsqueeze(0)
save_image(data, out_file, nrow=4)
elif data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_transmatrix(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None, off_surface_points=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if off_surface_points is not None:
ax.scatter(off_surface_points[:, 2], off_surface_points[:, 0], off_surface_points[:, 1], )
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualise_projection(
self, points, world_mat, camera_mat, img, output_file='out.png'):
r''' Visualizes the transformation and projection to image plane.
The first points of the batch are transformed and projected to the
respective image. After performing the relevant transformations, the
visualization is saved in the provided output_file path.
Arguments:
points (tensor): batch of point cloud points
world_mat (tensor): batch of matrices to rotate pc to camera-based
coordinates
camera_mat (tensor): batch of camera matrices to project to 2D image
plane
img (tensor): tensor of batch GT image files
output_file (string): where the output should be saved
'''
points_transformed = common.transform_points(points, world_mat)
points_img = common.project_to_camera(points_transformed, camera_mat)
pimg2 = points_img[0].detach().cpu().numpy()
image = img[0].cpu().numpy()
plt.imshow(image.transpose(1, 2, 0))
plt.plot(
(pimg2[:, 0] + 1)*image.shape[1]/2,
(pimg2[:, 1] + 1) * image.shape[2]/2, 'x')
plt.savefig(output_file)
| 4,668 | 30.33557 | 98 | py |
Im2Hands | Im2Hands-main/artihand/data/ref_occ_sample_hands.py | import os
import sys
import json
import pickle
import logging
import trimesh
import torch
import torchvision.transforms
import numpy as np
import cv2 as cv
import open3d as o3d
from glob import glob
from torch.utils import data
from manopth.manolayer import ManoLayer
from dependencies.halo.halo_adapter.converter import PoseConverter, transform_to_canonical
from dependencies.halo.halo_adapter.interface import (get_halo_model, convert_joints, change_axes, scale_halo_trans_mat)
from dependencies.halo.halo_adapter.projection import get_projection_layer
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
from dependencies.intaghand.dataset.dataset_utils import IMG_SIZE, HAND_BBOX_RATIO, HEATMAP_SIGMA, HEATMAP_SIZE, cut_img
logger = logging.getLogger(__name__)
def get_bone_lengths(joints):
bones = np.array([
(0,4),
(1,2),
(2,3),
(3,17),
(4,5),
(5,6),
(6,18),
(7,8),
(8,9),
(9,20),
(10,11),
(11,12),
(12,19),
(13,14),
(14,15),
(15,16)
])
bone_length = joints[bones[:,0]] - joints[bones[:,1]]
bone_length = np.linalg.norm(bone_length, axis=1)
return bone_length
# Codes adopted from HALO
def preprocess_joints(joints, side='right', scale=0.4):
permute_mat = [0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20]
joints -= joints[0]
joints = joints[permute_mat]
if side == 'left':
joints *= [-1, 1, 1]
org_joints = joints
joints = torch.Tensor(joints).unsqueeze(0)
joints = convert_joints(joints, source='halo', target='biomech')
is_right_vec = torch.ones(joints.shape[0])
pose_converter = PoseConverter()
palm_align_kps_local_cs, glo_rot_right = transform_to_canonical(joints.double(), is_right=is_right_vec)
palm_align_kps_local_cs_nasa_axes, swap_axes_mat = change_axes(palm_align_kps_local_cs)
rot_then_swap_mat = torch.matmul(swap_axes_mat.unsqueeze(0), glo_rot_right.float()).unsqueeze(0)
trans_mat_pc, _ = pose_converter(palm_align_kps_local_cs_nasa_axes, is_right_vec)
trans_mat_pc = convert_joints(trans_mat_pc, source='biomech', target='halo')
joints_for_nasa_input = [0, 2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16]
trans_mat_pc = trans_mat_pc[:, joints_for_nasa_input]
org_joints = torch.matmul(rot_then_swap_mat.squeeze(), xyz_to_xyz1(torch.Tensor(org_joints)).unsqueeze(-1))[:, :3, 0]
bone_lengths = torch.Tensor(get_bone_lengths(org_joints)).squeeze()
trans_mat_pc_all = trans_mat_pc
unpose_mat = scale_halo_trans_mat(trans_mat_pc_all)
scale_mat = torch.eye(4) * scale
scale_mat[3, 3] = 1.
unpose_mat = torch.matmul(unpose_mat, scale_mat.double()).squeeze()
return unpose_mat, torch.Tensor(bone_lengths).squeeze(0), rot_then_swap_mat.squeeze()
class RefOccSampleHandDataset(data.Dataset):
def __init__(self, data_path, anno_path, input_helpers, split=None,
no_except=True, transforms=None, subset=1, subset_idx=0):
assert split in ['train', 'test', 'val']
self.split = split
self.subset = subset
self.subset_idx = subset_idx
self.input_helpers = input_helpers
self.no_except = no_except
self.transforms = transforms
self.data_path = data_path
self.anno_path = anno_path
self.size = len(glob(os.path.join(data_path, split, 'anno', '*.pkl')))
self.normalize_img = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
with open(os.path.join(anno_path, split, 'InterHand2.6M_%s_MANO_NeuralAnnot.json' % split)) as f:
self.annot_file = json.load(f)
self.right_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False)
self.left_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False, side='left')
def __len__(self):
return self.size // self.subset
def __getitem__(self, idx):
idx = idx * self.subset + self.subset_idx
img_path = os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx))
img = cv.imread(os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx)))
img = cv.resize(img, (IMG_SIZE, IMG_SIZE))
imgTensor = torch.tensor(cv.cvtColor(img, cv.COLOR_BGR2RGB), dtype=torch.float32) / 255
imgTensor = imgTensor.permute(2, 0, 1)
imgTensor = self.normalize_img(imgTensor)
with open(os.path.join(self.data_path, self.split, 'anno', '{}.pkl'.format(idx)), 'rb') as file:
data = pickle.load(file)
pred_left_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints', '%07d_left.ply' % idx)).points)
pred_right_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints', '%07d_right.ply' % idx)).points)
pred_left_shape = trimesh.load(os.path.join(self.data_path, self.split, 'pred_shapes', '%07d_left.obj' % idx))
pred_right_shape = trimesh.load(os.path.join(self.data_path, self.split, 'pred_shapes', '%07d_right.obj' % idx))
camera_params = {}
camera_params['R'] = data['camera']['R']
camera_params['T'] = data['camera']['t']
camera_params['camera'] = data['camera']['camera']
capture_idx = data['image']['capture']
frame_idx = data['image']['frame_idx']
seq_name = data['image']['seq_name']
split_path = os.path.join(self.anno_path, self.split)
mano_data = {'right': {}, 'left': {}}
for side in ['right', 'left']:
for field_name, input_helper in self.input_helpers.items():
try:
model = '%s_%s_%s' % (capture_idx, frame_idx, side) # !!! TO BE MODIFIED
field_data = input_helper.load(split_path, model)
except Exception:
if self.no_except:
logger.warn(
'Error occured when loading field %s of model %s'
% (field_name.__class__.__name__, model)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
mano_data[side][field_name] = v
elif field_name == 'inputs':
mano_data[side][k] = v
else:
mano_data[side]['%s.%s' % (field_name, k)] = v
else:
mano_data[side][field_name] = field_data
camera_params['right_root_xyz'] = mano_data['right']['root_xyz']
camera_params['left_root_xyz'] = mano_data['left']['root_xyz']
if self.transforms is not None:
for side in ['right', 'left']:
for tran_name, tran in self.transforms.items():
mano_data[side] = tran(mano_data[side])
mano_data[side]['idx'] = idx
left_inputs, left_bone_lengths, left_root_rot_mat = preprocess_joints(pred_left_joints, side='left')
right_inputs, right_bone_lengths, right_root_rot_mat = preprocess_joints(pred_right_joints)
left_anchor_points = trimesh.sample.sample_surface_even(pred_left_shape, 512)[0] # sample_surface_even
right_anchor_points = trimesh.sample.sample_surface_even(pred_right_shape, 512)[0]
if left_anchor_points.shape[0] != 512:
left_anchor_points = np.concatenate((left_anchor_points, left_anchor_points[:512-left_anchor_points.shape[0]]), 0)
if right_anchor_points.shape[0] != 512:
right_anchor_points = np.concatenate((right_anchor_points, right_anchor_points[:512-right_anchor_points.shape[0]]), 0)
mano_data['left']['pred_joints'] = pred_left_joints
mano_data['left']['inputs'] = left_inputs
mano_data['left']['bone_lengths'] = left_bone_lengths
mano_data['left']['root_rot_mat'] = left_root_rot_mat
mano_data['left']['mid_joint'] = (pred_left_joints - pred_left_joints[0])[9]
mano_data['left']['anchor_points'] = left_anchor_points
mano_data['right']['pred_joints'] = pred_right_joints
mano_data['right']['inputs'] = right_inputs
mano_data['right']['bone_lengths'] = right_bone_lengths
mano_data['right']['root_rot_mat'] = right_root_rot_mat
mano_data['right']['mid_joint'] = (pred_right_joints - pred_right_joints[0])[9]
mano_data['right']['anchor_points'] = right_anchor_points
camera_params['img_path'] = img_path
return imgTensor, camera_params, mano_data, idx
| 8,986 | 38.244541 | 151 | py |
Im2Hands | Im2Hands-main/artihand/data/init_occ_sample_hands.py | import os
import sys
import json
import pickle
import logging
import torch
import torchvision.transforms
import numpy as np
import cv2 as cv
import open3d as o3d
from glob import glob
from torch.utils import data
from manopth.manolayer import ManoLayer
from dependencies.halo.halo_adapter.converter import PoseConverter, transform_to_canonical
from dependencies.halo.halo_adapter.interface import (get_halo_model, convert_joints, change_axes, scale_halo_trans_mat)
from dependencies.halo.halo_adapter.projection import get_projection_layer
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
from dependencies.intaghand.dataset.dataset_utils import IMG_SIZE, HAND_BBOX_RATIO, HEATMAP_SIGMA, HEATMAP_SIZE, cut_img
logger = logging.getLogger(__name__)
def get_bone_lengths(joints):
bones = np.array([
(0,4),
(1,2),
(2,3),
(3,17),
(4,5),
(5,6),
(6,18),
(7,8),
(8,9),
(9,20),
(10,11),
(11,12),
(12,19),
(13,14),
(14,15),
(15,16)
])
bone_length = joints[bones[:,0]] - joints[bones[:,1]]
bone_length = np.linalg.norm(bone_length, axis=1)
return bone_length
# Codes adopted from HALO
def preprocess_joints(joints, side='right', scale=0.4):
permute_mat = [0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20]
joints -= joints[0]
joints = joints[permute_mat]
if side == 'left':
joints *= [-1, 1, 1]
org_joints = joints
joints = torch.Tensor(joints).unsqueeze(0)
joints = convert_joints(joints, source='halo', target='biomech')
is_right_vec = torch.ones(joints.shape[0])
pose_converter = PoseConverter()
palm_align_kps_local_cs, glo_rot_right = transform_to_canonical(joints.double(), is_right=is_right_vec)
palm_align_kps_local_cs_nasa_axes, swap_axes_mat = change_axes(palm_align_kps_local_cs)
rot_then_swap_mat = torch.matmul(swap_axes_mat.unsqueeze(0), glo_rot_right.float()).unsqueeze(0)
trans_mat_pc, _ = pose_converter(palm_align_kps_local_cs_nasa_axes, is_right_vec)
trans_mat_pc = convert_joints(trans_mat_pc, source='biomech', target='halo')
joints_for_nasa_input = [0, 2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16]
trans_mat_pc = trans_mat_pc[:, joints_for_nasa_input]
org_joints = torch.matmul(rot_then_swap_mat.squeeze(), xyz_to_xyz1(torch.Tensor(org_joints)).unsqueeze(-1))[:, :3, 0]
bone_lengths = torch.Tensor(get_bone_lengths(org_joints)).squeeze()
trans_mat_pc_all = trans_mat_pc
unpose_mat = scale_halo_trans_mat(trans_mat_pc_all)
scale_mat = torch.eye(4) * scale
scale_mat[3, 3] = 1.
unpose_mat = torch.matmul(unpose_mat, scale_mat.double()).squeeze()
return unpose_mat, torch.Tensor(bone_lengths).squeeze(0), rot_then_swap_mat.squeeze()
class InitOccSampleHandDataset(data.Dataset):
def __init__(self, data_path, anno_path, input_helpers, split=None,
no_except=True, transforms=None, subset=1, subset_idx=0):
assert split in ['train', 'test', 'val']
self.split = split
self.subset = subset
self.subset_idx = subset_idx
self.input_helpers = input_helpers
self.no_except = no_except
self.transforms = transforms
self.data_path = data_path
self.anno_path = anno_path
self.size = len(glob(os.path.join(data_path, split, 'anno', '*.pkl')))
self.normalize_img = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
with open(os.path.join(anno_path, split, 'InterHand2.6M_%s_MANO_NeuralAnnot.json' % split)) as f:
self.annot_file = json.load(f)
self.right_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False)
self.left_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False, side='left')
def __len__(self):
return self.size // self.subset
def __getitem__(self, idx):
idx = idx * self.subset + self.subset_idx
img_path = os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx))
img = cv.imread(os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx)))
img = cv.resize(img, (IMG_SIZE, IMG_SIZE))
imgTensor = torch.tensor(cv.cvtColor(img, cv.COLOR_BGR2RGB), dtype=torch.float32) / 255
imgTensor = imgTensor.permute(2, 0, 1)
imgTensor = self.normalize_img(imgTensor)
with open(os.path.join(self.data_path, self.split, 'anno', '{}.pkl'.format(idx)), 'rb') as file:
data = pickle.load(file)
pred_left_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints', '%07d_left.ply' % idx)).points)
pred_right_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints', '%07d_right.ply' % idx)).points)
camera_params = {}
camera_params['R'] = data['camera']['R']
camera_params['T'] = data['camera']['t']
camera_params['camera'] = data['camera']['camera']
capture_idx = data['image']['capture']
frame_idx = data['image']['frame_idx']
seq_name = data['image']['seq_name']
split_path = os.path.join(self.anno_path, self.split)
mano_data = {'right': {}, 'left': {}}
for side in ['right', 'left']:
for field_name, input_helper in self.input_helpers.items():
try:
model = '%s_%s_%s' % (capture_idx, frame_idx, side) # !!! TO BE MODIFIED
field_data = input_helper.load(split_path, model)
except Exception:
if self.no_except:
logger.warn(
'Error occured when loading field %s of model %s'
% (field_name.__class__.__name__, model)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
mano_data[side][field_name] = v
elif field_name == 'inputs':
mano_data[side][k] = v
else:
mano_data[side]['%s.%s' % (field_name, k)] = v
else:
mano_data[side][field_name] = field_data
camera_params['right_root_xyz'] = mano_data['right']['root_xyz']
camera_params['left_root_xyz'] = mano_data['left']['root_xyz']
if self.transforms is not None:
for side in ['right', 'left']:
for tran_name, tran in self.transforms.items():
mano_data[side] = tran(mano_data[side])
mano_data[side]['idx'] = idx
left_inputs, left_bone_lengths, left_root_rot_mat = preprocess_joints(pred_left_joints, side='left')
right_inputs, right_bone_lengths, right_root_rot_mat = preprocess_joints(pred_right_joints)
mano_data['left']['pred_joints'] = pred_left_joints
mano_data['left']['inputs'] = left_inputs
mano_data['left']['bone_lengths'] = left_bone_lengths
mano_data['left']['root_rot_mat'] = left_root_rot_mat
mano_data['right']['pred_joints'] = pred_right_joints
mano_data['right']['inputs'] = right_inputs
mano_data['right']['bone_lengths'] = right_bone_lengths
mano_data['right']['root_rot_mat'] = right_root_rot_mat
mano_data['left']['img_path'] = img_path
return imgTensor, camera_params, mano_data, idx
| 7,874 | 35.971831 | 151 | py |
Im2Hands | Im2Hands-main/artihand/data/utils.py | import os
import numpy as np
from torch.utils import data
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id) | 568 | 24.863636 | 77 | py |
Im2Hands | Im2Hands-main/artihand/data/kpts_ref_sample_hands.py | import os
import sys
import json
import pickle
import logging
import torch
import torchvision.transforms
import numpy as np
import cv2 as cv
import open3d as o3d
from glob import glob
from torch.utils import data
from manopth.manolayer import ManoLayer
from dependencies.halo.halo_adapter.converter import PoseConverter, transform_to_canonical
from dependencies.halo.halo_adapter.interface import (get_halo_model, convert_joints, change_axes, scale_halo_trans_mat)
from dependencies.halo.halo_adapter.projection import get_projection_layer
from dependencies.halo.halo_adapter.transform_utils import xyz_to_xyz1
from dependencies.intaghand.dataset.dataset_utils import IMG_SIZE, HAND_BBOX_RATIO, HEATMAP_SIGMA, HEATMAP_SIZE, cut_img
logger = logging.getLogger(__name__)
def get_bone_lengths(joints):
bones = np.array([
(0,4),
(1,2),
(2,3),
(3,17),
(4,5),
(5,6),
(6,18),
(7,8),
(8,9),
(9,20),
(10,11),
(11,12),
(12,19),
(13,14),
(14,15),
(15,16)
])
bone_length = joints[bones[:,0]] - joints[bones[:,1]]
bone_length = np.linalg.norm(bone_length, axis=1)
return bone_length
# Codes adopted from HALO
def preprocess_joints(joints, side='right', scale=0.4):
permute_mat = [0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20]
joints -= joints[0]
joints = joints[permute_mat]
if side == 'left':
joints *= [-1, 1, 1]
org_joints = joints
joints = torch.Tensor(joints).unsqueeze(0)
joints = convert_joints(joints, source='halo', target='biomech')
is_right_vec = torch.ones(joints.shape[0])
pose_converter = PoseConverter()
palm_align_kps_local_cs, glo_rot_right = transform_to_canonical(joints.double(), is_right=is_right_vec)
palm_align_kps_local_cs_nasa_axes, swap_axes_mat = change_axes(palm_align_kps_local_cs)
rot_then_swap_mat = torch.matmul(swap_axes_mat.unsqueeze(0), glo_rot_right.float()).unsqueeze(0)
trans_mat_pc, _ = pose_converter(palm_align_kps_local_cs_nasa_axes, is_right_vec)
trans_mat_pc = convert_joints(trans_mat_pc, source='biomech', target='halo')
joints_for_nasa_input = [0, 2, 3, 17, 5, 6, 18, 8, 9, 20, 11, 12, 19, 14, 15, 16]
trans_mat_pc = trans_mat_pc[:, joints_for_nasa_input]
org_joints = torch.matmul(rot_then_swap_mat.squeeze(), xyz_to_xyz1(torch.Tensor(org_joints)).unsqueeze(-1))[:, :3, 0]
bone_lengths = torch.Tensor(get_bone_lengths(org_joints)).squeeze()
trans_mat_pc_all = trans_mat_pc
unpose_mat = scale_halo_trans_mat(trans_mat_pc_all)
scale_mat = torch.eye(4) * scale
scale_mat[3, 3] = 1.
unpose_mat = torch.matmul(unpose_mat, scale_mat.double()).squeeze()
return unpose_mat, torch.Tensor(bone_lengths).squeeze(0), rot_then_swap_mat.squeeze()
class KptsRefSampleHandDataset(data.Dataset):
def __init__(self, data_path, anno_path, input_helpers, split=None,
no_except=True, transforms=None, subset=1, subset_idx=0):
assert split in ['train', 'test', 'val']
self.split = split
self.subset = subset
self.subset_idx = subset_idx
self.input_helpers = input_helpers
self.no_except = no_except
self.transforms = transforms
self.data_path = data_path
self.anno_path = anno_path
self.size = len(glob(os.path.join(data_path, split, 'anno', '*.pkl')))
self.normalize_img = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
with open(os.path.join(anno_path, split, 'InterHand2.6M_%s_MANO_NeuralAnnot.json' % split)) as f:
self.annot_file = json.load(f)
self.right_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False)
self.left_mano_layer = ManoLayer(
mano_root='/workspace/mano_v1_2/models', use_pca=False, ncomps=45, flat_hand_mean=False, side='left')
def __len__(self):
return self.size // self.subset
def __getitem__(self, idx):
idx = idx * self.subset + self.subset_idx
img_path = os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx))
img = cv.imread(os.path.join(self.data_path, self.split, 'img', '{}.jpg'.format(idx)))
img = cv.resize(img, (IMG_SIZE, IMG_SIZE))
imgTensor = torch.tensor(cv.cvtColor(img, cv.COLOR_BGR2RGB), dtype=torch.float32) / 255
imgTensor = imgTensor.permute(2, 0, 1)
imgTensor = self.normalize_img(imgTensor)
with open(os.path.join(self.data_path, self.split, 'anno', '{}.pkl'.format(idx)), 'rb') as file:
data = pickle.load(file)
pred_left_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints_before_ref', '%07d_left.ply' % idx)).points)
pred_right_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'pred_joints_before_ref', '%07d_right.ply' % idx)).points)
gt_left_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'gt_joints', '%07d_left.ply' % idx)).points)
gt_right_joints = np.asarray(o3d.io.read_point_cloud(os.path.join(self.data_path, self.split, 'gt_joints', '%07d_right.ply' % idx)).points)
camera_params = {}
camera_params['R'] = data['camera']['R']
camera_params['T'] = data['camera']['t']
camera_params['camera'] = data['camera']['camera']
capture_idx = data['image']['capture']
frame_idx = data['image']['frame_idx']
seq_name = data['image']['seq_name']
split_path = os.path.join(self.anno_path, self.split)
mano_data = {'right': {}, 'left': {}}
for side in ['right', 'left']:
for field_name, input_helper in self.input_helpers.items():
try:
model = '%s_%s_%s' % (capture_idx, frame_idx, side) # !!! TO BE MODIFIED
field_data = input_helper.load(split_path, model)
except Exception:
if self.no_except:
logger.warn(
'Error occured when loading field %s of model %s'
% (field_name.__class__.__name__, model)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
mano_data[side][field_name] = v
elif field_name == 'inputs':
mano_data[side][k] = v
else:
mano_data[side]['%s.%s' % (field_name, k)] = v
else:
mano_data[side][field_name] = field_data
camera_params['right_root_xyz'] = mano_data['right']['root_xyz']
camera_params['left_root_xyz'] = mano_data['left']['root_xyz']
if self.transforms is not None:
for side in ['right', 'left']:
for tran_name, tran in self.transforms.items():
mano_data[side] = tran(mano_data[side])
mano_data[side]['idx'] = idx
left_inputs, left_bone_lengths, left_root_rot_mat = preprocess_joints(pred_left_joints, side='left')
right_inputs, right_bone_lengths, right_root_rot_mat = preprocess_joints(pred_right_joints)
# preprocess gt joints
permute_mat = [0, 5, 6, 7, 9, 10, 11, 17, 18, 19, 13, 14, 15, 1, 2, 3, 4, 8, 12, 16, 20]
pred_left_joints -= pred_left_joints[0]
pred_left_joints = pred_left_joints[permute_mat]
pred_left_joints *= [-1., 1., 1.]
pred_right_joints -= pred_right_joints[0]
pred_right_joints = pred_right_joints[permute_mat]
gt_left_joints -= gt_left_joints[0]
gt_left_joints = gt_left_joints[permute_mat]
gt_left_joints *= [-1., 1., 1.]
gt_right_joints -= gt_right_joints[0]
gt_right_joints = gt_right_joints[permute_mat]
#gt_left_joints = torch.matmul(left_root_rot_mat.squeeze().cpu(), xyz_to_xyz1(torch.Tensor(gt_left_joints).cpu()).unsqueeze(-1))[:, :3, 0]
#gt_right_joints = torch.matmul(right_root_rot_mat.squeeze().cpu(), xyz_to_xyz1(torch.Tensor(gt_right_joints).cpu()).unsqueeze(-1))[:, :3, 0]
mano_data['left']['pred_joints'] = pred_left_joints
mano_data['left']['root_rot_mat'] = left_root_rot_mat
mano_data['left']['joints'] = gt_left_joints
mano_data['right']['pred_joints'] = pred_right_joints
mano_data['right']['root_rot_mat'] = right_root_rot_mat
mano_data['right']['joints'] = gt_right_joints
camera_params['img_path'] = img_path
return imgTensor, camera_params, mano_data, idx
| 8,990 | 37.75431 | 162 | py |
Im2Hands | Im2Hands-main/artihand/data/transforms.py | import numpy as np
import torch
# Transforms
class PointcloudNoise(object):
''' Point cloud noise transformation class.
It adds noise to point cloud data.
Args:
stddev (int): standard deviation
'''
def __init__(self, stddev):
self.stddev = stddev
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
data_out = data.copy()
points = data[None]
noise = self.stddev * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
data_out[None] = points + noise
return data_out
class SubsamplePointcloud(object):
''' Point cloud subsampling transformation class.
It subsamples the point cloud data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
data_out = data.copy()
points = data['points']
normals = data['normals']
indices = np.random.randint(points.shape[0], size=self.N)
data_out['points'] = points[indices, :]
data_out['normals'] = normals[indices, :]
return data_out
class SubsampleOffPoint(object):
''' Point subsampling transformation class.
It subsamples the points that are not on the surface.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
data_out = data.copy()
off_points = data['off_points']
indices = np.random.randint(off_points.shape[0], size=self.N)
data_out['off_points'] = off_points[indices, :]
return data_out
class SampleOffPoint(object):
''' Transformation class for sampling off-surface point inside the bounding box.
It sample off surface points.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
data_out = data.copy()
off_surface_coords = np.random.uniform(-0.5, 0.5, size=(self.N, 3))
data_out['off_points'] = torch.from_numpy(off_surface_coords).float()
return data_out
class SubsamplePoints(object):
''' Points subsampling transformation class.
It subsamples the points data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
points = data['points']
occ = data['occ']
data_out = data.copy()
if isinstance(self.N, int):
idx = np.random.randint(points.shape[0], size=self.N)
data_out.update({
'points': points[idx, :],
'occ': occ[idx],
})
else:
Nt_out, Nt_in = self.N
occ_binary = (occ >= 0.5)
points0 = points[~occ_binary]
points1 = points[occ_binary]
idx0 = np.random.randint(points0.shape[0], size=Nt_out)
idx1 = np.random.randint(points1.shape[0], size=Nt_in)
points0 = points0[idx0, :]
points1 = points1[idx1, :]
points = np.concatenate([points0, points1], axis=0)
occ0 = np.zeros(Nt_out, dtype=np.float32)
occ1 = np.ones(Nt_in, dtype=np.float32)
occ = np.concatenate([occ0, occ1], axis=0)
volume = occ_binary.sum() / len(occ_binary)
volume = volume.astype(np.float32)
data_out.update({
'points': points,
'occ': occ,
'volume': volume,
})
return data_out
class SubsampleMeshVerts(object):
''' Mesh vertices subsampling transformation class.
It subsamples the mesh vertices data along with theirs labels.
Args:
N (int): number of vertices to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
data_out = data.copy()
mesh_verts = data['mesh_verts']
mesh_vert_labels = data['mesh_vert_labels']
idx = np.random.randint(mesh_verts.shape[0], size=self.N)
data_out.update({
'mesh_verts': mesh_verts[idx, :],
'mesh_vert_labels': mesh_vert_labels[idx],
})
return data_out
class ReshapeOcc(object):
''' Occupancy vector transformation class.
It reshapes the occupancy vector from .
Args:
N (int): number of vertices to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
data_out = data.copy()
mesh_verts = data['mesh_verts']
mesh_vert_labels = data['mesh_vert_labels']
idx = np.random.randint(mesh_verts.shape[0], size=self.N)
data_out.update({
'mesh_verts': mesh_verts[idx, :],
'mesh_vert_labels': mesh_vert_labels[idx],
})
return data_out | 5,631 | 24.6 | 84 | py |
Im2Hands | Im2Hands-main/im2mesh/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https')
| 2,963 | 28.346535 | 70 | py |
Im2Hands | Im2Hands-main/im2mesh/training.py | # from im2mesh import icp
import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 1,014 | 23.756098 | 65 | py |
Im2Hands | Im2Hands-main/im2mesh/layers.py | import torch
import torch.nn as nn
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
legacy (bool): whether to use legacy blocks
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm', legacy=False):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
if not legacy:
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
else:
self.bn_0 = CBatchNorm1d_legacy(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d_legacy(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetBlockConv1d(nn.Module):
''' 1D-Convolutional ResNet block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_h=None, size_out=None):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.bn_0 = nn.BatchNorm1d(size_in)
self.bn_1 = nn.BatchNorm1d(size_h)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(self.bn_0(x)))
dx = self.fc_1(self.actvn(self.bn_1(net)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
# Utility modules
class AffineLayer(nn.Module):
''' Affine layer class.
Args:
c_dim (tensor): dimension of latent conditioned code c
dim (int): input dimension
'''
def __init__(self, c_dim, dim=3):
super().__init__()
self.c_dim = c_dim
self.dim = dim
# Submodules
self.fc_A = nn.Linear(c_dim, dim * dim)
self.fc_b = nn.Linear(c_dim, dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.fc_A.weight)
nn.init.zeros_(self.fc_b.weight)
with torch.no_grad():
self.fc_A.bias.copy_(torch.eye(3).view(-1))
self.fc_b.bias.copy_(torch.tensor([0., 0., 2.]))
def forward(self, x, p):
assert(x.size(0) == p.size(0))
assert(p.size(2) == self.dim)
batch_size = x.size(0)
A = self.fc_A(x).view(batch_size, 3, 3)
b = self.fc_b(x).view(batch_size, 1, 3)
out = p @ A + b
return out
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
class CBatchNorm1d_legacy(nn.Module):
''' Conditional batch normalization legacy layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.fc_gamma = nn.Linear(c_dim, f_dim)
self.fc_beta = nn.Linear(c_dim, f_dim)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.fc_gamma.weight)
nn.init.zeros_(self.fc_beta.weight)
nn.init.ones_(self.fc_gamma.bias)
nn.init.zeros_(self.fc_beta.bias)
def forward(self, x, c):
batch_size = x.size(0)
# Affine mapping
gamma = self.fc_gamma(c)
beta = self.fc_beta(c)
gamma = gamma.view(batch_size, self.f_dim, 1)
beta = beta.view(batch_size, self.f_dim, 1)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
| 8,471 | 28.213793 | 71 | py |
Im2Hands | Im2Hands-main/im2mesh/config.py | import yaml
from torchvision import transforms
from im2mesh import data
from im2mesh import onet, r2n2, psgn, pix2mesh, dmc
from im2mesh import preprocess
method_dict = {
'onet': onet,
'r2n2': r2n2,
'psgn': psgn,
'pix2mesh': pix2mesh,
'dmc': dmc,
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
# Datasets
def get_dataset(mode, cfg, return_idx=False, return_category=False):
''' Returns the dataset.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
return_idx (bool): whether to include an ID field
'''
method = cfg['method']
dataset_type = cfg['data']['dataset']
dataset_folder = cfg['data']['path']
categories = cfg['data']['classes']
# Get split
splits = {
'train': cfg['data']['train_split'],
'val': cfg['data']['val_split'],
'test': cfg['data']['test_split'],
}
split = splits[mode]
# Create dataset
if dataset_type == 'Shapes3D':
# Dataset fields
# Method specific fields (usually correspond to output)
fields = method_dict[method].config.get_data_fields(mode, cfg)
# Input fields
inputs_field = get_inputs_field(mode, cfg)
if inputs_field is not None:
fields['inputs'] = inputs_field
if return_idx:
fields['idx'] = data.IndexField()
if return_category:
fields['category'] = data.CategoryField()
dataset = data.Shapes3dDataset(
dataset_folder, fields,
split=split,
categories=categories,
)
elif dataset_type == 'kitti':
dataset = data.KittiDataset(
dataset_folder, img_size=cfg['data']['img_size'],
return_idx=return_idx
)
elif dataset_type == 'online_products':
dataset = data.OnlineProductDataset(
dataset_folder, img_size=cfg['data']['img_size'],
classes=cfg['data']['classes'],
max_number_imgs=cfg['generation']['max_number_imgs'],
return_idx=return_idx, return_category=return_category
)
elif dataset_type == 'images':
dataset = data.ImageDataset(
dataset_folder, img_size=cfg['data']['img_size'],
return_idx=return_idx,
)
else:
raise ValueError('Invalid dataset "%s"' % cfg['data']['dataset'])
return dataset
def get_inputs_field(mode, cfg):
''' Returns the inputs fields.
Args:
mode (str): the mode which is used
cfg (dict): config dictionary
'''
input_type = cfg['data']['input_type']
with_transforms = cfg['data']['with_transforms']
if input_type is None:
inputs_field = None
elif input_type == 'img':
if mode == 'train' and cfg['data']['img_augment']:
resize_op = transforms.RandomResizedCrop(
cfg['data']['img_size'], (0.75, 1.), (1., 1.))
else:
resize_op = transforms.Resize((cfg['data']['img_size']))
transform = transforms.Compose([
resize_op, transforms.ToTensor(),
])
with_camera = cfg['data']['img_with_camera']
if mode == 'train':
random_view = True
else:
random_view = False
inputs_field = data.ImagesField(
cfg['data']['img_folder'], transform,
with_camera=with_camera, random_view=random_view
)
elif input_type == 'pointcloud':
transform = transforms.Compose([
data.SubsamplePointcloud(cfg['data']['pointcloud_n']),
data.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
with_transforms = cfg['data']['with_transforms']
inputs_field = data.PointCloudField(
cfg['data']['pointcloud_file'], transform,
with_transforms=with_transforms
)
elif input_type == 'voxels':
inputs_field = data.VoxelsField(
cfg['data']['voxels_file']
)
elif input_type == 'idx':
inputs_field = data.IndexField()
else:
raise ValueError(
'Invalid input type (%s)' % input_type)
return inputs_field
def get_preprocessor(cfg, dataset=None, device=None):
''' Returns preprocessor instance.
Args:
cfg (dict): config dictionary
dataset (dataset): dataset
device (device): pytorch device
'''
p_type = cfg['preprocessor']['type']
cfg_path = cfg['preprocessor']['config']
model_file = cfg['preprocessor']['model_file']
if p_type == 'psgn':
preprocessor = preprocess.PSGNPreprocessor(
cfg_path=cfg_path,
pointcloud_n=cfg['data']['pointcloud_n'],
dataset=dataset,
device=device,
model_file=model_file,
)
elif p_type is None:
preprocessor = None
else:
raise ValueError('Invalid Preprocessor %s' % p_type)
return preprocessor
| 7,343 | 27.355212 | 76 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.