code stringlengths 17 6.64M |
|---|
def patch_replication_callback(data_parallel):
'\n Monkey-patch an existing `DataParallel` object. Add the replication callback.\n Useful when you have customized `DataParallel` implementation.\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n > patch_replication_callback(sync_bn)\n # this is equivalent to\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n '
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
|
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=0.001, rtol=0.001):
(npa, npb) = (as_numpy(a), as_numpy(b))
self.assertTrue(np.allclose(npa, npb, atol=atol), 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs((npa - npb)).max(), np.abs(((npa - npb) / np.fmax(npa, 1e-05))).max()))
|
def index_dataset(dataset):
'\n get the index according to the dataset type(e.g. pascal or atr or cihp)\n :param dataset:\n :return:\n '
return_dict = {}
for i in range(len(dataset)):
tmp_lbl = dataset.datasets_lbl[i]
if (tmp_lbl in return_dict):
return_dict[tmp_lbl].append(i)
else:
return_dict[tmp_lbl] = [i]
return return_dict
|
def sample_from_class(dataset, class_id):
return dataset[class_id][random.randrange(len(dataset[class_id]))]
|
def sampler_npair_K(batch_size, dataset, K=2, label_random_list=[0, 0, 1, 1, 2, 2, 2]):
images_by_class = index_dataset(dataset)
for batch_idx in range(int(math.ceil(((len(dataset) * 1.0) / batch_size)))):
example_indices = [sample_from_class(images_by_class, class_label_ind) for _ in range(batch_size) for class_label_ind in [label_random_list[random.randrange(len(label_random_list))]]]
(yield example_indices[:batch_size])
|
def sampler_(images_by_class, batch_size, dataset, K=2, label_random_list=[0, 0, 1, 1]):
a = label_random_list[random.randrange(len(label_random_list))]
example_indices = [sample_from_class(images_by_class, a) for _ in range(batch_size) for class_label_ind in [a]]
return example_indices[:batch_size]
|
class cusSampler(torch.utils.data.sampler.Sampler):
'Samples elements randomly from a given list of indices, without replacement.\n\n Arguments:\n indices (sequence): a sequence of indices\n '
def __init__(self, dataset, batchsize, label_random_list=[0, 1, 1, 1, 2, 2, 2]):
self.images_by_class = index_dataset(dataset)
self.batch_size = batchsize
self.dataset = dataset
self.label_random_list = label_random_list
self.len = int(math.ceil(((len(dataset) * 1.0) / batchsize)))
def __iter__(self):
return iter(sampler_(self.images_by_class, self.batch_size, self.dataset, self.label_random_list))
def __len__(self):
return self.len
|
def shuffle_cus(d1=20, d2=10, d3=5, batch=2):
return_list = []
total_num = ((d1 + d2) + d3)
list1 = list(range(d1))
batch1 = (d1 // batch)
list2 = list(range(d1, (d1 + d2)))
batch2 = (d2 // batch)
list3 = list(range((d1 + d2), ((d1 + d2) + d3)))
batch3 = (d3 // batch)
random.shuffle(list1)
random.shuffle(list2)
random.shuffle(list3)
random_list = list(range(((batch1 + batch2) + batch3)))
random.shuffle(random_list)
for random_batch_index in random_list:
if (random_batch_index < batch1):
random_batch_index1 = random_batch_index
return_list += list1[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
elif (random_batch_index < (batch1 + batch2)):
random_batch_index1 = (random_batch_index - batch1)
return_list += list2[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
else:
random_batch_index1 = ((random_batch_index - batch1) - batch2)
return_list += list3[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
return return_list
|
def shuffle_cus_balance(d1=20, d2=10, d3=5, batch=2, balance_index=1):
return_list = []
total_num = ((d1 + d2) + d3)
list1 = list(range(d1))
list2 = list(range(d1, (d1 + d2)))
list3 = list(range((d1 + d2), ((d1 + d2) + d3)))
random.shuffle(list1)
random.shuffle(list2)
random.shuffle(list3)
total_list = [list1, list2, list3]
target_list = total_list[balance_index]
for (index, list_item) in enumerate(total_list):
if (index == balance_index):
continue
if (len(list_item) > len(target_list)):
list_item = list_item[:len(target_list)]
total_list[index] = list_item
list1 = total_list[0]
list2 = total_list[1]
list3 = total_list[2]
d1 = len(list1)
batch1 = (d1 // batch)
d2 = len(list2)
batch2 = (d2 // batch)
d3 = len(list3)
batch3 = (d3 // batch)
random_list = list(range(((batch1 + batch2) + batch3)))
random.shuffle(random_list)
for random_batch_index in random_list:
if (random_batch_index < batch1):
random_batch_index1 = random_batch_index
return_list += list1[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
elif (random_batch_index < (batch1 + batch2)):
random_batch_index1 = (random_batch_index - batch1)
return_list += list2[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
else:
random_batch_index1 = ((random_batch_index - batch1) - batch2)
return_list += list3[(random_batch_index1 * batch):((random_batch_index1 + 1) * batch)]
return return_list
|
class Sampler_uni(torch.utils.data.sampler.Sampler):
def __init__(self, num1, num2, num3, batchsize, balance_id=None):
self.num1 = num1
self.num2 = num2
self.num3 = num3
self.batchsize = batchsize
self.balance_id = balance_id
def __iter__(self):
if (self.balance_id is not None):
rlist = shuffle_cus_balance(self.num1, self.num2, self.num3, self.batchsize, balance_index=self.balance_id)
else:
rlist = shuffle_cus(self.num1, self.num2, self.num3, self.batchsize)
return iter(rlist)
def __len__(self):
if (self.balance_id is not None):
return (self.num1 * 3)
return ((self.num1 + self.num2) + self.num3)
|
def main():
(image_paths, label_paths) = init_path()
hist = compute_hist(image_paths, label_paths)
show_result(hist)
|
def init_path():
list_file = './human/list/val_id.txt'
file_names = []
with open(list_file, 'rb') as f:
for fn in f:
file_names.append(fn.strip())
image_dir = './human/features/attention/val/results/'
label_dir = './human/data/labels/'
image_paths = []
label_paths = []
for file_name in file_names:
image_paths.append(os.path.join(image_dir, (file_name + '.png')))
label_paths.append(os.path.join(label_dir, (file_name + '.png')))
return (image_paths, label_paths)
|
def fast_hist(lbl, pred, n_cls):
'\n compute the miou\n :param lbl: label\n :param pred: output\n :param n_cls: num of class\n :return:\n '
k = ((lbl >= 0) & (lbl < n_cls))
return np.bincount(((n_cls * lbl[k].astype(int)) + pred[k]), minlength=(n_cls ** 2)).reshape(n_cls, n_cls)
|
def compute_hist(images, labels, n_cls=20):
hist = np.zeros((n_cls, n_cls))
for (img_path, label_path) in zip(images, labels):
label = Image.open(label_path)
label_array = np.array(label, dtype=np.int32)
image = Image.open(img_path)
image_array = np.array(image, dtype=np.int32)
gtsz = label_array.shape
imgsz = image_array.shape
if (not (gtsz == imgsz)):
image = image.resize((gtsz[1], gtsz[0]), Image.ANTIALIAS)
image_array = np.array(image, dtype=np.int32)
hist += fast_hist(label_array, image_array, n_cls)
return hist
|
def show_result(hist):
classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', 'rightShoe']
num_cor_pix = np.diag(hist)
num_gt_pix = hist.sum(1)
print(('=' * 50))
acc = (num_cor_pix.sum() / hist.sum())
print('>>>', 'overall accuracy', acc)
print(('-' * 50))
print('Accuracy for each class (pixel accuracy):')
for i in range(20):
print(('%-15s: %f' % (classes[i], (num_cor_pix[i] / num_gt_pix[i]))))
acc = (num_cor_pix / num_gt_pix)
print('>>>', 'mean accuracy', np.nanmean(acc))
print(('-' * 50))
union = ((num_gt_pix + hist.sum(0)) - num_cor_pix)
for i in range(20):
print(('%-15s: %f' % (classes[i], (num_cor_pix[i] / union[i]))))
iu = (num_cor_pix / ((num_gt_pix + hist.sum(0)) - num_cor_pix))
print('>>>', 'mean IU', np.nanmean(iu))
print(('-' * 50))
freq = (num_gt_pix / hist.sum())
print('>>>', 'fwavacc', (freq[(freq > 0)] * iu[(freq > 0)]).sum())
print(('=' * 50))
|
def get_iou(pred, lbl, n_cls):
'\n need tensor cpu\n :param pred:\n :param lbl:\n :param n_cls:\n :return:\n '
hist = np.zeros((n_cls, n_cls))
for (i, j) in zip(range(pred.size(0)), range(lbl.size(0))):
pred_item = pred[i].data.numpy()
lbl_item = lbl[j].data.numpy()
hist += fast_hist(lbl_item, pred_item, n_cls)
num_cor_pix = np.diag(hist)
num_gt_pix = hist.sum(1)
union = ((num_gt_pix + hist.sum(0)) - num_cor_pix)
iu = (num_cor_pix / ((num_gt_pix + hist.sum(0)) - num_cor_pix))
print('>>>', 'mean IU', np.nanmean(iu))
miou = np.nanmean(iu)
print(('-' * 50))
return miou
|
def get_iou_from_list(pred, lbl, n_cls):
'\n need tensor cpu\n :param pred: list\n :param lbl: list\n :param n_cls:\n :return:\n '
hist = np.zeros((n_cls, n_cls))
for (i, j) in zip(range(len(pred)), range(len(lbl))):
pred_item = pred[i].data.numpy()
lbl_item = lbl[j].data.numpy()
hist += fast_hist(lbl_item, pred_item, n_cls)
num_cor_pix = np.diag(hist)
num_gt_pix = hist.sum(1)
union = ((num_gt_pix + hist.sum(0)) - num_cor_pix)
acc = (num_cor_pix.sum() / hist.sum())
print('>>>', 'overall accuracy', acc)
print(('-' * 50))
iu = (num_cor_pix / ((num_gt_pix + hist.sum(0)) - num_cor_pix))
print('>>>', 'mean IU', np.nanmean(iu))
miou = np.nanmean(iu)
print(('-' * 50))
acc = (num_cor_pix / num_gt_pix)
print('>>>', 'mean accuracy', np.nanmean(acc))
print(('-' * 50))
return miou
|
class Logger(object):
def __init__(self, args, experiment_dir):
super(Logger, self).__init__()
self.num_iter = {'train': 0, 'test': 0}
self.no_disk_write_ops = args.no_disk_write_ops
self.rank = args.rank
if (not self.no_disk_write_ops):
self.experiment_dir = experiment_dir
for phase in ['train', 'test']:
os.makedirs(((experiment_dir / 'images') / phase), exist_ok=True)
self.to_image = transforms.ToPILImage()
if (args.rank == 0):
if ((args.which_epoch != 'none') and (args.init_experiment_dir == '')):
self.losses = pickle.load(open((self.experiment_dir / 'losses.pkl'), 'rb'))
else:
self.losses = {}
self.writer = tensorboardX.SummaryWriter('/tensorboard')
def output_logs(self, phase, visuals, losses, time):
if (not self.no_disk_write_ops):
self.num_iter[phase] += 1
self.to_image(visuals).save((((self.experiment_dir / 'images') / phase) / ('%04d_%02d.jpg' % (self.num_iter[phase], self.rank))))
if (self.rank != 0):
return
self.writer.add_image(f'results_{phase}', visuals, self.num_iter[phase])
for (key, value) in losses.items():
if (key in self.losses):
self.losses[key].append(value)
else:
self.losses[key] = [value]
self.writer.add_scalar(f'{key}_{phase}', value, self.num_iter[phase])
pickle.dump(self.losses, open((self.experiment_dir / 'losses.pkl'), 'wb'))
elif (self.rank != 0):
return
print((', '.join((('%s: %.3f' % (key, value)) for (key, value) in losses.items())) + (', time: %.3f' % time)))
def set_num_iter(self, train_iter, test_iter):
self.num_iter = {'train': train_iter, 'test': test_iter}
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--adv_pred_type', type=str, default='ragan', choices=['gan', 'rgan', 'ragan'])
parser.add('--adv_loss_weight', type=float, default=0.5)
def __init__(self, args):
super(LossWrapper, self).__init__()
get_preds = {'gan': (lambda real_scores, fake_scores: (real_scores, fake_scores)), 'rgan': (lambda real_scores, fake_scores: ((real_scores - fake_scores), (fake_scores - real_scores))), 'ragan': (lambda real_scores, fake_scores: ((real_scores - fake_scores.mean()), (fake_scores - real_scores.mean())))}
self.get_preds = get_preds[args.adv_pred_type]
self.loss_dis = (lambda real_preds, fake_preds: (torch.relu((1 - real_preds)).mean() + torch.relu((1 + fake_preds)).mean()))
if ('r' in args.adv_pred_type):
self.loss_gen = (lambda real_preds, fake_preds: (torch.relu((1 - fake_preds)).mean() + torch.relu((1 + real_preds)).mean()))
else:
self.loss_gen = (lambda real_preds, fake_preds: (- fake_preds.mean()))
self.weight = args.adv_loss_weight
def forward(self, data_dict, losses_dict):
real_scores = data_dict['real_scores']
fake_scores = data_dict['fake_scores_dis']
(real_preds, fake_preds) = self.get_preds(real_scores, fake_scores)
losses_dict['D_ADV'] = (self.loss_dis(real_preds, fake_preds) * self.weight)
real_scores = real_scores.detach()
fake_scores = data_dict['fake_scores_gen']
(real_preds, fake_preds) = self.get_preds(real_scores, fake_scores)
losses_dict['G_ADV'] = (self.loss_gen(real_preds, fake_preds) * self.weight)
return losses_dict
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--fem_loss_type', type=str, default='l1', help='l1|mse')
parser.add('--fem_loss_weight', type=float, default=10.0)
def __init__(self, args):
super(LossWrapper, self).__init__()
losses = {'mse': F.mse_loss, 'l1': F.l1_loss}
self.loss = losses[args.fem_loss_type]
self.weight = args.fem_loss_weight
def forward(self, data_dict, losses_dict):
real_feats_gen = data_dict['real_feats_gen']
fake_feats_gen = data_dict['fake_feats_gen']
loss = 0
for (real_feats, fake_feats) in zip(real_feats_gen, fake_feats_gen):
loss += self.loss(fake_feats, real_feats.detach())
loss /= len(real_feats_gen)
loss *= self.weight
losses_dict['G_FM'] = loss
return losses_dict
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--pix_loss_type', type=str, default='l1')
parser.add('--pix_loss_weights', type=str, default='10.0', help='comma separated floats')
parser.add('--pix_loss_apply_to', type=str, default='pred_target_delta_lf_rgbs, target_imgs', help='can specify multiple tensor names from data_dict')
parser.add('--pix_loss_names', type=str, default='L1', help='name for each loss')
def __init__(self, args):
super(LossWrapper, self).__init__()
self.apply_to = [rn_utils.parse_str_to_list(s, sep=',') for s in rn_utils.parse_str_to_list(args.pix_loss_apply_to, sep=';')]
losses = {'mse': F.mse_loss, 'l1': F.l1_loss, 'ce': F.cross_entropy}
self.loss = losses[args.pix_loss_type]
self.weights = rn_utils.parse_str_to_list(args.pix_loss_weights, value_type=float)
self.names = rn_utils.parse_str_to_list(args.pix_loss_names)
def forward(self, data_dict, losses_dict):
for (i, (tensor_name, target_tensor_name)) in enumerate(self.apply_to):
real_imgs = data_dict[target_tensor_name]
fake_imgs = data_dict[tensor_name]
(b, t) = fake_imgs.shape[:2]
fake_imgs = fake_imgs.view((b * t), *fake_imgs.shape[2:])
if ('HalfTensor' in fake_imgs.type()):
real_imgs = real_imgs.type(fake_imgs.type())
real_imgs = real_imgs.view((b * t), *real_imgs.shape[2:])
loss = self.loss(fake_imgs, real_imgs.detach())
losses_dict[('G_' + self.names[i])] = (loss * self.weights[i])
return losses_dict
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--seg_loss_type', type=str, default='bce')
parser.add('--seg_loss_weights', type=float, default=10.0)
parser.add('--seg_loss_apply_to', type=str, default='pred_target_inf_segs_logits, target_segs', help='can specify multiple tensor names from data_dict')
parser.add('--seg_loss_names', type=str, default='BCE', help='name for each loss')
def __init__(self, args):
super(LossWrapper, self).__init__()
self.apply_to = [rn_utils.parse_str_to_list(s, sep=',') for s in rn_utils.parse_str_to_list(args.seg_loss_apply_to, sep=';')]
self.names = rn_utils.parse_str_to_list(args.seg_loss_names, sep=',')
losses = {'bce': F.binary_cross_entropy_with_logits, 'dice': (lambda fake_seg, real_seg: (torch.log(((fake_seg ** 2).sum() + (real_seg ** 2).sum())) - torch.log(((2 * fake_seg) * real_seg).sum())))}
self.loss = losses[args.seg_loss_type]
self.weights = args.seg_loss_weights
self.eps = args.eps
def forward(self, data_dict, losses_dict):
for (i, (tensor_name, target_tensor_name)) in enumerate(self.apply_to):
real_segs = data_dict[target_tensor_name]
fake_segs = data_dict[tensor_name]
(b, t) = fake_segs.shape[:2]
fake_segs = fake_segs.view((b * t), *fake_segs.shape[2:])
if ('HalfTensor' in fake_segs.type()):
real_segs = real_segs.type(fake_segs.type())
real_segs = real_segs.view((b * t), *real_segs.shape[2:])
losses_dict[('G_' + self.names[i])] = (self.loss(fake_segs, real_segs) * self.weights)
return losses_dict
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--ssm_use_masks', action='store_true', help='use masks before application of the loss')
parser.add('--ssm_calc_grad', action='store_true', help='if True, the loss is differentiable')
def __init__(self, args):
super(LossWrapper, self).__init__()
self.calc_grad = args.ssm_calc_grad
self.use_masks = args.ssm_use_masks
self.loss = SSIM()
def forward(self, data_dict, losses_dict):
real_imgs = data_dict['target_imgs']
fake_imgs = data_dict['pred_target_imgs']
(b, t, c, h, w) = real_imgs.shape
real_imgs = real_imgs.view((- 1), c, h, w)
fake_imgs = fake_imgs.view((- 1), c, h, w)
if self.use_masks:
real_segs = data_dict['real_segs'].view((b * t), (- 1), h, w)
real_imgs = (real_imgs * real_segs)
fake_imgs = (fake_imgs * real_segs)
if self.calc_grad:
loss = self.loss(fake_imgs, real_imgs)
else:
with torch.no_grad():
loss = self.loss(fake_imgs.detach(), real_imgs)
losses_dict['G_SSIM'] = loss.mean()
return losses_dict
|
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(((- ((x - (window_size // 2)) ** 2)) / float((2 * (sigma ** 2))))) for x in range(window_size)])
return (gauss / gauss.sum())
|
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
|
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel)
mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=(window_size // 2), groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=(window_size // 2), groups=channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=(window_size // 2), groups=channel) - mu1_mu2)
C1 = (0.01 ** 2)
C2 = (0.03 ** 2)
ssim_map = ((((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2)) / (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
|
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if ((channel == self.channel) and (self.window.data.type() == img1.data.type())):
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
|
class LossWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--wpr_loss_type', type=str, default='l1')
parser.add('--wpr_loss_weight', type=float, default=10.0)
parser.add('--wpr_loss_weight_decay', type=float, default=0.9, help='multiplicative decay of loss weight')
parser.add('--wpr_loss_decay_schedule', type=int, default=50, help='num iters after which decay happends')
parser.add('--wpr_loss_apply_to', type=str, default='pred_target_delta_uvs', help='tensors this loss is applied to')
def __init__(self, args):
super(LossWrapper, self).__init__()
self.apply_to = rn_utils.parse_str_to_list(args.wpr_loss_apply_to)
self.eps = args.eps
self.reg_type = args.wpr_loss_type
self.weight = args.wpr_loss_weight
self.weight_decay = args.wpr_loss_weight_decay
self.decay_schedule = args.wpr_loss_decay_schedule
self.num_iters = 0
def forward(self, data_dict, losses_dict):
if (self.num_iters == self.decay_schedule):
self.weight = max((self.weight * self.weight_decay), self.eps)
self.num_iters = 1
if (self.weight == self.eps):
return losses_dict
loss = 0
for tensor_name in self.apply_to:
if (self.reg_type == 'l1'):
loss += data_dict[tensor_name].abs().mean()
else:
raise
loss /= len(self.apply_to)
losses_dict['G_WPR'] = (loss * self.weight)
if (self.weight_decay != 1.0):
self.num_iters += 1
return losses_dict
|
class NetworkWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--dis_num_channels', default=64, type=int, help='minimum number of channels')
parser.add('--dis_max_channels', default=512, type=int, help='maximum number of channels')
parser.add('--dis_no_stickman', action='store_true', help='do not input stickman into the discriminator')
parser.add('--dis_num_blocks', default=6, type=int, help='number of convolutional blocks')
parser.add('--dis_output_tensor_size', default=8, type=int, help='spatial size of the last tensor')
parser.add('--dis_norm_layer_type', default='bn', type=str, help='norm layer inside the discriminator')
parser.add('--dis_activation_type', default='leakyrelu', type=str, help='activation layer inside the discriminator')
parser.add('--dis_downsampling_type', default='avgpool', type=str, help='downsampling layer inside the discriminator')
parser.add('--dis_fake_imgs_name', default='pred_target_imgs', type=str, help='name of the tensor with fake images')
def __init__(self, args):
super(NetworkWrapper, self).__init__()
self.args = args
self.net = Discriminator(args)
def forward(self, data_dict: dict, net_names_to_train: list, all_networks: dict) -> dict:
real_inputs = data_dict['target_imgs']
fake_inputs = data_dict[self.args.dis_fake_imgs_name]
if (not self.args.dis_no_stickman):
real_inputs = torch.cat([real_inputs, data_dict['target_stickmen']], 2)
fake_inputs = torch.cat([fake_inputs, data_dict['target_stickmen']], 2)
(b, t, c, h, w) = real_inputs.shape
real_inputs = real_inputs.view((- 1), c, h, w)
fake_inputs = fake_inputs.view((- 1), c, h, w)
for p in self.parameters():
p.requires_grad = True
inputs = torch.cat([real_inputs, fake_inputs.detach()])
(scores_dis, _) = self.net(inputs)
(real_scores, fake_scores_dis) = scores_dis.split(b)
data_dict['real_scores'] = real_scores
data_dict['fake_scores_dis'] = fake_scores_dis
for p in self.parameters():
p.requires_grad = False
inputs = torch.cat([real_inputs, fake_inputs])
(scores_gen, feats_gen) = self.net(inputs)
(_, fake_scores_gen) = scores_gen.split(b)
feats = [feats_block.split(b) for feats_block in feats_gen]
(real_feats_gen, fake_feats_gen) = map(list, zip(*feats))
data_dict['fake_scores_gen'] = fake_scores_gen
data_dict['real_feats_gen'] = real_feats_gen
data_dict['fake_feats_gen'] = fake_feats_gen
return data_dict
@torch.no_grad()
def visualize_outputs(self, data_dict):
visuals = []
if ('target_stickmen' in data_dict.keys()):
visuals += [data_dict['target_stickmen']]
return visuals
def __repr__(self):
num_params = 0
for p in self.net.parameters():
num_params += p.numel()
output = self.net.__repr__()
output += '\n'
output += ('Number of parameters: %d' % num_params)
return output
|
class Discriminator(nn.Module):
def __init__(self, args):
super(Discriminator, self).__init__()
num_down_blocks = int(math.log((args.image_size // args.dis_output_tensor_size), 2))
out_channels = args.dis_num_channels
self.first_conv = nn.Conv2d(in_channels=(3 + (3 * (not args.dis_no_stickman))), out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.blocks = nn.ModuleList()
for i in range(1, (num_down_blocks + 1)):
in_channels = out_channels
out_channels = min(int((args.dis_num_channels * (2 ** i))), args.dis_max_channels)
self.blocks += [utils.ResBlock(in_channels=in_channels, out_channels=out_channels, stride=2, eps=args.eps, activation_type=args.dis_activation_type, norm_layer_type=args.dis_norm_layer_type, resize_layer_type=args.dis_downsampling_type, return_feats=True)]
for i in range((num_down_blocks + 1), (args.dis_num_blocks + 1)):
self.blocks += [utils.ResBlock(in_channels=out_channels, out_channels=out_channels, eps=args.eps, activation_type=args.dis_activation_type, norm_layer_type=args.dis_norm_layer_type, resize_layer_type='none', return_feats=True)]
norm_layer = utils.norm_layers[args.dis_norm_layer_type]
activation = utils.activations[args.dis_activation_type]
self.final_block = nn.Sequential(norm_layer(out_channels, None, eps=args.eps), activation(inplace=True))
self.linear = nn.Conv2d(out_channels, 1, 1)
def forward(self, inputs):
conv_outputs = self.first_conv(inputs)
feats = []
for block in self.blocks:
(conv_outputs, block_feats) = block(conv_outputs)
feats += block_feats
conv_outputs = self.final_block(conv_outputs)
scores = self.linear(conv_outputs)
return (scores, feats)
|
class NetworkWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--emb_num_channels', default=64, type=int, help='minimum number of channels')
parser.add('--emb_max_channels', default=512, type=int, help='maximum number of channels')
parser.add('--emb_no_stickman', action='store_true', help='do not input stickman into the embedder')
parser.add('--emb_output_tensor_size', default=8, type=int, help='spatial size of the last tensor')
parser.add('--emb_norm_layer_type', default='none', type=str, help='norm layer inside the embedder')
parser.add('--emb_activation_type', default='leakyrelu', type=str, help='activation layer inside the embedder')
parser.add('--emb_downsampling_type', default='avgpool', type=str, help='downsampling layer inside the embedder')
parser.add('--emb_apply_masks', default='True', type=rn_utils.str2bool, choices=[True, False], help='apply segmentation masks to source ground-truth images')
def __init__(self, args):
super(NetworkWrapper, self).__init__()
self.args = args
self.net = Embedder(args)
def forward(self, data_dict: dict, networks_to_train: list, all_networks: dict) -> dict:
'The function modifies the input data_dict to contain the embeddings for the source images'
if ('identity_embedder' not in networks_to_train):
prev = torch.is_grad_enabled()
torch.set_grad_enabled(False)
inputs = data_dict['source_imgs']
(b, n) = inputs.shape[:2]
if self.args.emb_apply_masks:
inputs = ((inputs * data_dict['source_segs']) + ((- 1) * (1 - data_dict['source_segs'])))
if (not self.args.emb_no_stickman):
inputs = torch.cat([inputs, data_dict['source_stickmen']], 2)
source_embeds = self.net(inputs)
if ('identity_embedder' not in networks_to_train):
torch.set_grad_enabled(prev)
data_dict['source_idt_embeds'] = source_embeds
return data_dict
@torch.no_grad()
def visualize_outputs(self, data_dict):
visuals = [data_dict['source_imgs'].detach()]
if ('source_stickmen' in data_dict.keys()):
visuals += [data_dict['source_stickmen']]
return visuals
def __repr__(self):
num_params = 0
for p in self.net.parameters():
num_params += p.numel()
output = self.net.__repr__()
output += '\n'
output += ('Number of parameters: %d' % num_params)
return output
|
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
num_enc_blocks = int(math.log((args.image_size // args.emb_output_tensor_size), 2))
num_dec_blocks = int(math.log((args.image_size // args.tex_input_tensor_size), 2))
out_channels = args.emb_num_channels
layers = [nn.Conv2d(in_channels=(3 + (3 * (not args.emb_no_stickman))), out_channels=out_channels, kernel_size=3, stride=1, padding=1)]
for i in range(1, (num_enc_blocks + 1)):
in_channels = out_channels
out_channels = min(int((args.emb_num_channels * (2 ** i))), args.emb_max_channels)
layers += [nt_utils.ResBlock(in_channels=in_channels, out_channels=out_channels, stride=2, eps=args.eps, activation_type=args.emb_activation_type, norm_layer_type=args.emb_norm_layer_type, resize_layer_type=args.emb_downsampling_type, frames_per_person=args.num_source_frames, output_aggregated=(i == num_enc_blocks))]
self.enc = nn.Sequential(*layers)
layers = []
for i in range((num_dec_blocks - 1), (- 1), (- 1)):
in_channels = out_channels
out_channels = min(int((args.tex_num_channels * (2 ** i))), args.tex_max_channels)
layers += [nt_utils.ResBlock(in_channels=in_channels, out_channels=out_channels, eps=args.eps, activation_type=args.emb_activation_type, norm_layer_type=args.emb_norm_layer_type, resize_layer_type='none', return_feats=True)]
self.dec_blocks = nn.ModuleList(layers)
def forward(self, inputs):
(b, n, c, h, w) = inputs.shape
outputs = self.enc(inputs.view((- 1), c, h, w))
embeds = []
for block in self.dec_blocks:
(outputs, embeds_block) = block(outputs)
embeds += embeds_block
if (embeds[0].shape[0] == (b * n)):
embeds = [embeds_block.view(b, n, *embeds_block.shape[1:]).mean(dim=1) for embeds_block in embeds]
return embeds
|
class NetworkWrapper(nn.Module):
@staticmethod
def get_args(parser):
parser.add('--pse_num_channels', default=256, type=int, help='number of intermediate channels')
parser.add('--pse_num_blocks', default=4, type=int, help='number of encoding blocks')
parser.add('--pse_in_channels', default=394, type=int, help='number of channels in either latent pose (if present) of keypoints')
parser.add('--pse_emb_source_pose', action='store_true', help='predict embeddings for the source pose')
parser.add('--pse_norm_layer_type', default='none', type=str, help='norm layer inside the pose embedder')
parser.add('--pse_activation_type', default='leakyrelu', type=str, help='activation layer inside the pose embedder')
parser.add('--pse_use_harmonic_enc', action='store_true', help='encode keypoints with harmonics')
parser.add('--pse_num_harmonics', default=4, type=int, help='number of frequencies used')
def __init__(self, args):
super(NetworkWrapper, self).__init__()
self.args = args
self.net = PoseEmbedder(args)
if self.args.pse_use_harmonic_enc:
frequencies = ((torch.ones(args.pse_num_harmonics) * np.pi) * (2 ** torch.arange(args.pse_num_harmonics)))
frequencies = frequencies[(None, None)]
self.register_buffer('frequencies', frequencies)
def forward(self, data_dict: dict, networks_to_train: list, all_networks: dict) -> dict:
'The function modifies the input data_dict to contain the pose \n embeddings for the target and (optinally) source images'
if ('keypoints_embedder' not in networks_to_train):
prev = torch.is_grad_enabled()
torch.set_grad_enabled(False)
target_poses = data_dict['target_poses']
(b, t) = target_poses.shape[:2]
target_poses = target_poses.view((b * t), (- 1))
if self.args.pse_use_harmonic_enc:
target_poses = (target_poses[(..., None)] * self.frequencies).view((b * t), (- 1))
target_poses = torch.cat([torch.sin(target_poses), torch.cos(target_poses)], dim=1)
if self.args.pse_emb_source_pose:
source_poses = data_dict['source_poses']
n = source_poses.shape[1]
source_poses = source_poses.view((b * n), (- 1))
if self.args.pse_use_harmonic_enc:
source_poses = (source_poses[(..., None)] * self.frequencies).view((b * t), (- 1))
source_poses = torch.cat([torch.sin(source_poses), torch.cos(source_poses)], dim=1)
target_embeds = self.net(target_poses)
if self.args.pse_emb_source_pose:
source_embeds = self.net(source_poses)
if ('keypoints_embedder' not in networks_to_train):
torch.set_grad_enabled(prev)
data_dict['target_pose_embeds'] = target_embeds.view(b, t, *target_embeds.shape[1:])
if self.args.pse_emb_source_pose:
data_dict['source_pose_embeds'] = source_embeds.view(b, n, *source_embeds.shape[1:])
return data_dict
@torch.no_grad()
def visualize_outputs(self, data_dict):
visuals = []
return visuals
def __repr__(self):
num_params = 0
for p in self.net.parameters():
num_params += p.numel()
output = self.net.__repr__()
output += '\n'
output += ('Number of parameters: %d' % num_params)
return output
|
class PoseEmbedder(nn.Module):
def __init__(self, args):
super(PoseEmbedder, self).__init__()
self.num_channels = args.inf_max_channels
self.spatial_size = args.inf_input_tensor_size
norm_layer = utils.norm_layers[args.pse_norm_layer_type]
activation = utils.activations[args.pse_activation_type]
if args.pse_use_harmonic_enc:
in_channels = ((args.pse_in_channels * args.pse_num_harmonics) * 2)
else:
in_channels = args.pse_in_channels
if (args.pse_num_blocks == 1):
num_channels = (self.num_channels * (self.spatial_size ** 2))
else:
num_channels = args.pse_num_channels
layers = [nn.Linear(in_channels, num_channels)]
for i in range(1, (args.pse_num_blocks - 1)):
if (args.pse_norm_layer_type != 'none'):
layers += [norm_layer(num_channels, None, eps=args.eps)]
layers += [activation(inplace=True), nn.Linear(num_channels, num_channels)]
if (args.pse_num_blocks != 1):
if (args.pse_norm_layer_type != 'none'):
layers += [norm_layer(num_channels, None, eps=args.eps)]
layers += [activation(inplace=True), nn.Linear(num_channels, (self.num_channels * (self.spatial_size ** 2)))]
self.mlp = nn.Sequential(*layers)
def forward(self, inputs):
pose_embeds = self.mlp(inputs)
pose_embeds = pose_embeds.view((- 1), self.num_channels, self.spatial_size, self.spatial_size)
return pose_embeds
|
class TrainingWrapper(object):
@staticmethod
def get_args(parser):
parser.add('--project_dir', default='.', type=str, help='root directory of the code')
parser.add('--torch_home', default='', type=str, help='directory used for storage of the checkpoints')
parser.add('--experiment_name', default='test', type=str, help='name of the experiment used for logging')
parser.add('--dataloader_name', default='voxceleb2', type=str, help='name of the file in dataset directory which is used for data loading')
parser.add('--dataset_name', default='voxceleb2_512px', type=str, help='name of the dataset in the data root folder')
parser.add('--data_root', default='.', type=str, help='root directory of the data')
parser.add('--debug', action='store_true', help='turn on the debug mode: fast epoch, useful for testing')
parser.add('--runner_name', default='default', type=str, help='class that wraps the models and performs training and inference steps')
parser.add('--no_disk_write_ops', action='store_true', help='avoid doing write operations to disk')
parser.add('--redirect_print_to_file', action='store_true', help='redirect stdout and stderr to file')
parser.add('--random_seed', default=0, type=int, help='used for initialization of pytorch and numpy seeds')
parser.add('--init_experiment_dir', default='', type=str, help='directory of the experiment used for the initialization of the networks')
parser.add('--init_networks', default='', type=str, help='list of networks to intialize')
parser.add('--init_which_epoch', default='none', type=str, help='epoch to initialize from')
parser.add('--which_epoch', default='none', type=str, help='epoch to continue training from')
parser.add('--num_gpus', default=1, type=int, help='>1 enables DDP')
parser.add('--num_epochs', default=1000, type=int, help='number of epochs for training')
parser.add('--checkpoint_freq', default=25, type=int, help='frequency of checkpoints creation in epochs')
parser.add('--test_freq', default=5, type=int, help='frequency of testing in epochs')
parser.add('--batch_size', default=1, type=int, help='batch size across all GPUs')
parser.add('--num_workers_per_process', default=20, type=int, help='number of workers used for data loading in each process')
parser.add('--skip_test', action='store_true', help='do not perform testing')
parser.add('--calc_stats', action='store_true', help='calculate batch norm standing stats')
parser.add('--visual_freq', default=(- 1), type=int, help='in iterations, -1 -- output logs every epoch')
parser.add('--use_half', action='store_true', help='enable half precision calculation')
parser.add('--use_closure', action='store_true', help='use closure function during optimization (required by LBFGS)')
parser.add('--use_apex', action='store_true', help='enable apex')
parser.add('--amp_opt_level', default='O0', type=str, help='full/mixed/half precision, refer to apex.amp docs')
parser.add('--amp_loss_scale', default='dynamic', type=str, help='fixed or dynamic loss scale')
parser.add('--local_rank', default=0, type=int)
parser.add('--rank', default=0, type=int)
parser.add('--world_size', default=1, type=int)
parser.add('--train_size', default=1, type=int)
(args, _) = parser.parse_known_args()
os.environ['TORCH_HOME'] = args.torch_home
importlib.import_module(f'datasets.{args.dataloader_name}').DatasetWrapper.get_args(parser)
importlib.import_module(f'runners.{args.runner_name}').RunnerWrapper.get_args(parser)
return parser
def __init__(self, args, runner=None):
super(TrainingWrapper, self).__init__()
ssl._create_default_https_context = ssl._create_unverified_context
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
if ((args.num_gpus > 1) and (args.num_gpus <= 8)):
args.rank = args.local_rank
args.world_size = args.num_gpus
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
elif (args.num_gpus > 8):
raise
project_dir = pathlib.Path(args.project_dir)
self.checkpoints_dir = (((project_dir / 'runs') / args.experiment_name) / 'checkpoints')
if (not args.no_disk_write_ops):
os.makedirs(self.checkpoints_dir, exist_ok=True)
self.experiment_dir = ((project_dir / 'runs') / args.experiment_name)
if (not args.no_disk_write_ops):
if args.redirect_print_to_file:
logs_dir = (self.experiment_dir / 'logs')
os.makedirs(logs_dir, exist_ok=True)
sys.stdout = open(os.path.join(logs_dir, f'stdout_{args.rank}.txt'), 'w')
sys.stderr = open(os.path.join(logs_dir, f'stderr_{args.rank}.txt'), 'w')
if (args.rank == 0):
print(args)
with open((self.experiment_dir / 'args.txt'), 'wt') as args_file:
for (k, v) in sorted(vars(args).items()):
args_file.write(('%s: %s\n' % (str(k), str(v))))
self.runner = runner
if (self.runner is None):
self.runner = importlib.import_module(f'runners.{args.runner_name}').RunnerWrapper(args)
init_networks = (rn_utils.parse_str_to_list(args.init_networks) if args.init_networks else {})
networks_to_train = self.runner.nets_names_to_train
if ((args.init_which_epoch != 'none') and args.init_experiment_dir):
for net_name in init_networks:
self.runner.nets[net_name].load_state_dict(torch.load(((pathlib.Path(args.init_experiment_dir) / 'checkpoints') / f'{args.init_which_epoch}_{net_name}.pth'), map_location='cpu'))
if (args.which_epoch != 'none'):
for net_name in networks_to_train:
if (net_name not in init_networks):
self.runner.nets[net_name].load_state_dict(torch.load((self.checkpoints_dir / f'{args.which_epoch}_{net_name}.pth'), map_location='cpu'))
if (args.num_gpus > 0):
self.runner.cuda()
if (args.rank == 0):
print(self.runner)
def train(self, args):
if args.use_apex:
from apex import amp
amp.init(False)
train_dataloader = ds_utils.get_dataloader(args, 'train')
if (not args.skip_test):
test_dataloader = ds_utils.get_dataloader(args, 'test')
model = runner = self.runner
if args.use_half:
runner.half()
opts = runner.get_optimizers(args)
if ((args.which_epoch != 'none') and (not args.init_experiment_dir)):
for (net_name, opt) in opts.items():
opt.load_state_dict(torch.load((self.checkpoints_dir / f'{args.which_epoch}_opt_{net_name}.pth'), map_location='cpu'))
if (args.use_apex and (args.num_gpus > 0) and (args.num_gpus <= 8)):
(nets_list, opts_list) = ([], [])
for net_name in sorted(opts.keys()):
nets_list.append(runner.nets[net_name])
opts_list.append(opts[net_name])
loss_scale = (float(args.amp_loss_scale) if (args.amp_loss_scale != 'dynamic') else args.amp_loss_scale)
(nets_list, opts_list) = amp.initialize(nets_list, opts_list, opt_level=args.amp_opt_level, num_losses=1, loss_scale=loss_scale)
for (net_name, net, opt) in zip(sorted(opts.keys()), nets_list, opts_list):
runner.nets[net_name] = net
opts[net_name] = opt
if ((args.which_epoch != 'none') and (not args.init_experiment_dir) and os.path.exists((self.checkpoints_dir / f'{args.which_epoch}_amp.pth'))):
amp.load_state_dict(torch.load((self.checkpoints_dir / f'{args.which_epoch}_amp.pth'), map_location='cpu'))
if ((args.num_gpus > 1) and (args.num_gpus <= 8)):
from apex import parallel
model = parallel.DistributedDataParallel(runner, delay_allreduce=True)
epoch_start = (1 if (args.which_epoch == 'none') else (int(args.which_epoch) + 1))
train_iter = (epoch_start - 1)
if (args.visual_freq != (- 1)):
train_iter /= args.visual_freq
logger = Logger(args, self.experiment_dir)
logger.set_num_iter(train_iter=train_iter, test_iter=((epoch_start - 1) // args.test_freq))
if (args.debug and (not args.use_apex)):
torch.autograd.set_detect_anomaly(True)
total_iters = 1
for epoch in range(epoch_start, (args.num_epochs + 1)):
if (args.rank == 0):
print(('epoch %d' % epoch))
model.train()
time_start = time.time()
train_dataloader.dataset.shuffle()
for (i, data_dict) in enumerate(train_dataloader, 1):
if ((args.num_gpus > 0) and (args.num_gpus > 0)):
for (key, value) in data_dict.items():
data_dict[key] = value.cuda()
if args.use_half:
for (key, value) in data_dict.items():
data_dict[key] = value.half()
output_logs = (i == len(train_dataloader))
if (args.visual_freq != (- 1)):
output_logs = (not (total_iters % args.visual_freq))
output_visuals = (output_logs and (not args.no_disk_write_ops))
for opt in opts.values():
opt.zero_grad()
if (not args.use_closure):
loss = model(data_dict)
closure = None
if (args.use_apex and (args.num_gpus > 0) and (args.num_gpus <= 8)):
with amp.scale_loss(loss, opts.values()) as scaled_loss:
scaled_loss.backward()
elif (not args.use_closure):
loss.backward()
else:
def closure():
loss = model(data_dict)
loss.backward()
return loss
for opt in opts.values():
opt.step(closure)
if output_logs:
logger.output_logs('train', runner.output_visuals(), runner.output_losses(), (time.time() - time_start))
if args.debug:
break
if (args.visual_freq != (- 1)):
total_iters += 1
total_iters %= args.visual_freq
train_dataloader.dataset.epoch += 1
if (epoch % args.test_freq):
continue
if (not args.skip_test):
if args.calc_stats:
runner.calculate_batchnorm_stats(train_dataloader, args.debug)
time_start = time.time()
model.eval()
for data_dict in test_dataloader:
if (args.num_gpus > 0):
for (key, value) in data_dict.items():
data_dict[key] = value.cuda()
with torch.no_grad():
model(data_dict)
if args.debug:
break
logger.output_logs('test', runner.output_visuals(), runner.output_losses(), (time.time() - time_start))
if ((epoch % args.checkpoint_freq) and (not args.debug)):
continue
if ((args.rank == 0) and (not args.no_disk_write_ops)):
with torch.no_grad():
for net_name in runner.nets_names_to_train:
torch.save(runner.nets[net_name].state_dict(), (self.checkpoints_dir / f'{epoch}_{net_name}.pth'))
torch.save(opts[net_name].state_dict(), (self.checkpoints_dir / f'{epoch}_opt_{net_name}.pth'))
if args.use_apex:
torch.save(amp.state_dict(), (self.checkpoints_dir / f'{epoch}_amp.pth'))
return runner
|
def get_hparams_defaults():
'Get a yacs hparamsNode object with default values for my_project.'
return hparams.clone()
|
def update_hparams(hparams_file):
hparams = get_hparams_defaults()
hparams.merge_from_file(hparams_file)
return hparams.clone()
|
def update_hparams_from_dict(cfg_dict):
hparams = get_hparams_defaults()
cfg = hparams.load_cfg(str(cfg_dict))
hparams.merge_from_other_cfg(cfg)
return hparams.clone()
|
def get_grid_search_configs(config, excluded_keys=[]):
'\n :param config: dictionary with the configurations\n :return: The different configurations\n '
def bool_to_string(x: Union[(List[bool], bool)]) -> Union[(List[str], str)]:
'\n boolean to string conversion\n :param x: list or bool to be converted\n :return: string converted thinghat\n '
if isinstance(x, bool):
return [str(x)]
for (i, j) in enumerate(x):
x[i] = str(j)
return x
flattened_config_dict = flatten(config, reducer='path')
hyper_params = []
for (k, v) in flattened_config_dict.items():
if isinstance(v, list):
if (k in excluded_keys):
flattened_config_dict[k] = ['+'.join(v)]
elif (len(v) > 1):
hyper_params += [k]
if (isinstance(v, list) and isinstance(v[0], bool)):
flattened_config_dict[k] = bool_to_string(v)
if (not isinstance(v, list)):
if isinstance(v, bool):
flattened_config_dict[k] = bool_to_string(v)
else:
flattened_config_dict[k] = [v]
(keys, values) = zip(*flattened_config_dict.items())
experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]
for (exp_id, exp) in enumerate(experiments):
for param in excluded_keys:
exp[param] = exp[param].strip().split('+')
for (param_name, param_value) in exp.items():
if (isinstance(param_value, list) and (param_value[0] in ['True', 'False'])):
exp[param_name] = [(True if (x == 'True') else False) for x in param_value]
if (param_value in ['True', 'False']):
if (param_value == 'True'):
exp[param_name] = True
else:
exp[param_name] = False
experiments[exp_id] = unflatten(exp, splitter='path')
return (experiments, hyper_params)
|
def run_grid_search_experiments(cfg_id, cfg_file, script='main.py'):
cfg = yaml.load(open(cfg_file))
(different_configs, hyperparams) = get_grid_search_configs(cfg, excluded_keys=[])
logger.info(f'''Grid search hparams:
{hyperparams}''')
different_configs = [update_hparams_from_dict(c) for c in different_configs]
logger.info(f'======> Number of experiment configurations is {len(different_configs)}')
config_to_run = CN(different_configs[cfg_id])
logtime = time.strftime('%d-%m-%Y_%H-%M-%S')
logdir = f'{logtime}_{config_to_run.EXP_NAME}'
def get_from_dict(dict, keys):
return reduce(operator.getitem, keys, dict)
exp_id = ''
for hp in hyperparams:
v = get_from_dict(different_configs[cfg_id], hp.split('/'))
exp_id += f"{hp.replace('/', '.').replace('_', '').lower()}-{v}"
config_to_run.EXP_ID = f'{config_to_run.EXP_NAME}'
if exp_id:
logdir += f'_{exp_id}'
config_to_run.EXP_ID += f'/{exp_id}'
logdir = os.path.join(config_to_run.LOG_DIR, config_to_run.METHOD, config_to_run.EXP_NAME, logdir)
os.makedirs(logdir, exist_ok=True)
shutil.copy(src=cfg_file, dst=os.path.join(config_to_run.LOG_DIR, 'config.yaml'))
config_to_run.LOG_DIR = logdir
def save_dict_to_yaml(obj, filename, mode='w'):
with open(filename, mode) as f:
yaml.dump(obj, f, default_flow_style=False)
save_dict_to_yaml(unflatten(flatten(config_to_run)), os.path.join(config_to_run.LOG_DIR, 'config_to_run.yaml'))
return config_to_run
|
class MixedDataset(torch.utils.data.Dataset):
def __init__(self, options, method, **kwargs):
if options.TRAIN_3DPW:
self.dataset_list = ['h36m', 'coco', 'mpi-inf-3dhp', '3dpw']
self.dataset_dict = {'h36m': 0, 'coco': 1, 'mpi-inf-3dhp': 2, '3dpw': 3}
else:
self.dataset_list = ['h36m', 'coco', 'mpi-inf-3dhp']
self.dataset_dict = {'h36m': 0, 'coco': 1, 'mpi-inf-3dhp': 2}
logger.info(f'Datasets used for training --> {self.dataset_list}')
self.datasets = [BaseDataset(options, method, ds, **kwargs) for ds in self.dataset_list]
total_length = sum([len(ds) for ds in self.datasets])
length_itw = sum([len(ds) for ds in self.datasets[1:(- 1)]])
self.length = max([len(ds) for ds in self.datasets])
'\n Data distribution inside each batch - EFT data settings:\n 50% H36M - 30% ITW - 20% MPI-INF\n '
if options.TRAIN_3DPW:
self.partition = [0.3, 0.4, 0.1, 0.2]
else:
self.partition = [0.5, 0.3, 0.2]
self.partition = np.array(self.partition).cumsum()
def __getitem__(self, index):
p = np.random.rand()
for i in range(len(self.dataset_list)):
if (p <= self.partition[i]):
return self.datasets[i][(index % len(self.datasets[i]))]
def __len__(self):
return self.length
|
def _make_divisible(v, divisor, min_value=None):
'\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n '
if (min_value is None):
min_value = divisor
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v
|
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = ((kernel_size - 1) // 2)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), norm_layer(out_planes), nn.ReLU6(inplace=True))
|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), norm_layer(oup)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8, block=None, norm_layer=None):
'\n MobileNet V2 main class\n\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n block: Module specifying inverted residual building block for mobilenet\n norm_layer: Module specifying the normalization layer to use\n\n '
super(MobileNetV2, self).__init__()
if (block is None):
block = InvertedResidual
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
self.features = nn.Sequential(*features)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
x = self.features(x)
return x
def forward(self, x):
return self._forward_impl(x)
|
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
'\n Constructs a MobileNetV2 architecture from\n `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
|
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
|
def resnet18(pretrained=False, progress=True, **kwargs):
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
|
def resnet34(pretrained=False, progress=True, **kwargs):
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet50(pretrained=False, progress=True, **kwargs):
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet101(pretrained=False, progress=True, **kwargs):
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def resnet152(pretrained=False, progress=True, **kwargs):
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
|
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def get_backbone_info(backbone):
info = {'resnet18': {'n_output_channels': 512, 'downsample_rate': 4}, 'resnet34': {'n_output_channels': 512, 'downsample_rate': 4}, 'resnet50': {'n_output_channels': 2048, 'downsample_rate': 4}, 'resnet101': {'n_output_channels': 2048, 'downsample_rate': 4}, 'resnet152': {'n_output_channels': 2048, 'downsample_rate': 4}, 'resnext50_32x4d': {'n_output_channels': 2048, 'downsample_rate': 4}, 'resnext101_32x8d': {'n_output_channels': 2048, 'downsample_rate': 4}, 'wide_resnet50_2': {'n_output_channels': 2048, 'downsample_rate': 4}, 'wide_resnet101_2': {'n_output_channels': 2048, 'downsample_rate': 4}, 'mobilenet_v2': {'n_output_channels': 1280, 'downsample_rate': 4}}
return info[backbone]
|
class hmr_head(nn.Module):
def __init__(self, num_input_features, smpl_mean_params=SMPL_MEAN_PARAMS):
super(hmr_head, self).__init__()
npose = (24 * 6)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(((num_input_features + npose) + 13), 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def forward(self, features, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
batch_size = features.shape[0]
if (init_pose is None):
init_pose = self.init_pose.expand(batch_size, (- 1))
if (init_shape is None):
init_shape = self.init_shape.expand(batch_size, (- 1))
if (init_cam is None):
init_cam = self.init_cam.expand(batch_size, (- 1))
xf = self.avgpool(features)
xf = xf.view(xf.size(0), (- 1))
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = (self.decpose(xc) + pred_pose)
pred_shape = (self.decshape(xc) + pred_shape)
pred_cam = (self.deccam(xc) + pred_cam)
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
output = {'pred_pose': pred_rotmat, 'pred_cam': pred_cam, 'pred_shape': pred_shape}
return output
|
class HMR(nn.Module):
def __init__(self, backbone='resnet50', img_res=224, pretrained=None):
super(HMR, self).__init__()
self.backbone = eval(backbone)(pretrained=True)
self.head = hmr_head(num_input_features=get_backbone_info(backbone)['n_output_channels'])
self.smpl = smpl_head(img_res=img_res)
if (pretrained is not None):
self.load_pretrained(pretrained)
def forward(self, images):
features = self.backbone(images)
hmr_output = self.head(features)
smpl_output = self.smpl(rotmat=hmr_output['pred_pose'], shape=hmr_output['pred_shape'], cam=hmr_output['pred_cam'], normalize_joints2d=True)
smpl_output.update(hmr_output)
return smpl_output
def load_pretrained(self, file):
logger.info(f'Loading pretrained weights from {file}')
try:
state_dict = torch.load(file)['model']
except:
try:
state_dict = prepare_statedict(torch.load(file)['state_dict'])
except:
state_dict = add_smpl_params_to_dict(torch.load(file))
self.backbone.load_state_dict(state_dict, strict=False)
self.head.load_state_dict(state_dict, strict=False)
|
def get_distance_matrix(target):
dist_mat = ndimage.distance_transform_edt((1 - target))
return dist_mat
|
def distance_transform_loss(predict, dist_mat):
prod = torch.sum((predict * dist_mat))
norm = (torch.sum(predict) ** (3 / 2))
dist = (prod / (norm + 1e-06))
return dist
|
def neg_iou_loss(predict, target):
assert (predict.shape == target.shape), 'Target and Predict should have same shape'
dims = tuple(range(predict.ndimension())[1:])
intersect = (predict * target).sum(dims)
union = (((predict + target) - (predict * target)).sum(dims) + 1e-06)
return (1.0 - ((intersect / union).sum() / intersect.nelement()))
|
def dsr_mc_loss(predict, target, dist_mat, loss_type='DistM', silhouette=False):
if (loss_type == 'DistM'):
return distance_transform_loss(predict[:3], dist_mat)
elif (loss_type == 'nIOU'):
predict = (predict[3] if silhouette else predict[:3].mean(0))
return neg_iou_loss(predict, target[0])
else:
logger.warning(f'Not a valid DSR_MC Loss - use DistM/nIOU')
return 0
|
class RandomSampler(Sampler):
def __init__(self, data_source, checkpoint):
self.data_source = data_source
if ((checkpoint is not None) and (checkpoint['dataset_perm'] is not None)):
self.dataset_perm = checkpoint['dataset_perm']
self.perm = self.dataset_perm[(checkpoint['batch_size'] * checkpoint['batch_idx']):]
else:
self.dataset_perm = torch.randperm(len(self.data_source)).tolist()
self.perm = torch.randperm(len(self.data_source)).tolist()
def __iter__(self):
return iter(self.perm)
def __len__(self):
return len(self.perm)
|
class SequentialSampler(Sampler):
def __init__(self, data_source, checkpoint):
self.data_source = data_source
if ((checkpoint is not None) and (checkpoint['dataset_perm'] is not None)):
self.dataset_perm = checkpoint['dataset_perm']
self.perm = self.dataset_perm[(checkpoint['batch_size'] * checkpoint['batch_idx']):]
else:
self.dataset_perm = list(range(len(self.data_source)))
self.perm = self.dataset_perm
def __iter__(self):
return iter(self.perm)
def __len__(self):
return len(self.perm)
|
class CheckpointDataLoader(DataLoader):
'\n Extends torch.utils.data.DataLoader to handle resuming training from an arbitrary point within an epoch.\n '
def __init__(self, dataset, checkpoint=None, batch_size=1, shuffle=False, num_workers=0, pin_memory=False, drop_last=True, timeout=0, worker_init_fn=None):
if shuffle:
sampler = RandomSampler(dataset, checkpoint)
else:
sampler = SequentialSampler(dataset, checkpoint)
if (checkpoint is not None):
self.checkpoint_batch_idx = checkpoint['batch_idx']
else:
self.checkpoint_batch_idx = 0
super(CheckpointDataLoader, self).__init__(dataset, sampler=sampler, shuffle=False, batch_size=batch_size, num_workers=num_workers, drop_last=drop_last, pin_memory=pin_memory, timeout=timeout, worker_init_fn=None)
|
def copy_code(output_folder, curr_folder, code_folder='code'):
code_folder = osp.join(output_folder, code_folder)
if (not osp.exists(code_folder)):
os.makedirs(code_folder)
logger.info('Copying main files ...')
for f in [x for x in os.listdir(curr_folder) if x.endswith('.py')]:
mainpy_path = osp.join(curr_folder, f)
dest_mainpy_path = osp.join(code_folder, f)
shutil.copy2(mainpy_path, dest_mainpy_path)
logger.info('Copying the rest of the source code ...')
for f in ['dsr', 'configs']:
src_folder = osp.join(curr_folder, f)
dest_folder = osp.join(code_folder, osp.split(src_folder)[1])
if os.path.exists(dest_folder):
shutil.rmtree(dest_folder)
shutil.copytree(src_folder, dest_folder, ignore=ignore_patterns('*.pyc', 'tmp*', '__pycache__'))
|
def prepare_statedict(state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
if ('model' in name):
name = name.replace('model.', '')
if ('backbone' in name):
name = name.replace('backbone.', '')
if ('head' in name):
name = name.replace('head.', '')
new_state_dict[name] = param
return new_state_dict
|
def add_smpl_params_to_dict(state_dict):
mean_params = np.load(SMPL_MEAN_PARAMS)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
state_dict['init_pose'] = init_pose
state_dict['init_shape'] = init_shape
state_dict['init_cam'] = init_cam
return state_dict
|
def set_seed(seed_value):
if (seed_value >= 0):
logger.info(f'Seed value for the experiment {seed_value}')
os.environ['PYTHONHASHSEED'] = str(seed_value)
pl.trainer.seed_everything(seed_value)
|
def load_pretrained_model(model, pt_file, strict=False, overwrite_shape_mismatch=True):
state_dict = torch.load(pt_file)['state_dict']
try:
model.load_state_dict(state_dict, strict=strict)
except RuntimeError:
if overwrite_shape_mismatch:
model_state_dict = model.state_dict()
pretrained_keys = state_dict.keys()
model_keys = model_state_dict.keys()
updated_pretrained_state_dict = state_dict.copy()
for pk in pretrained_keys:
if (pk in model_keys):
if (model_state_dict[pk].shape != state_dict[pk].shape):
logger.warning(f'size mismatch for "{pk}": copying a param with shape {state_dict[pk].shape} from checkpoint, the shape in current model is {model_state_dict[pk].shape}')
del updated_pretrained_state_dict[pk]
model.load_state_dict(updated_pretrained_state_dict, strict=False)
else:
raise RuntimeError('there are shape inconsistencies between pretrained ckpt and current ckpt')
|
def main(hparams):
log_dir = hparams.LOG_DIR
device = ('cuda' if torch.cuda.is_available() else 'cpu')
set_seed(hparams.SEED_VALUE)
logger.add(os.path.join(log_dir, 'train.log'), level='INFO', colorize=False)
copy_code(output_folder=log_dir, curr_folder=os.path.dirname(os.path.abspath(__file__)))
logger.info(torch.cuda.get_device_properties(device))
logger.info(f'''Hyperparameters:
{hparams}''')
model = LitModule(hparams=hparams).to(device)
ckpt_callback = False
tb_logger = None
if (hparams.PL_LOGGING == True):
ckpt_callback = ModelCheckpoint(monitor='val_loss', verbose=True, save_top_k=5, mode='min')
tb_logger = TensorBoardLogger(save_dir=log_dir, name='tb_logs')
trainer = pl.Trainer(gpus=1, logger=tb_logger, max_epochs=hparams.TRAINING.MAX_EPOCHS, log_save_interval=hparams.TRAINING.LOG_SAVE_INTERVAL, terminate_on_nan=True, default_root_dir=log_dir, check_val_every_n_epoch=hparams.TRAINING.CHECK_VAL_EVERY_N_EPOCH, checkpoint_callback=ckpt_callback, reload_dataloaders_every_epoch=hparams.TRAINING.RELOAD_DATALOADERS_EVERY_EPOCH, resume_from_checkpoint=hparams.TRAINING.RESUME, num_sanity_val_steps=0, log_gpu_memory=True)
if hparams.RUN_TEST:
logger.info('*** Started testing ***')
trainer.test(model=model)
else:
logger.info('*** Started training ***')
trainer.fit(model)
trainer.test()
|
def match_ann(fileName):
js = json.loads(open(fileName).read())
for items in js['people']:
handRight = items['hand_right_keypoints_2d']
confPoints = helper.confidencePoints(handRight)
confidence = helper.confidence(confPoints)
if (confidence > 10.2):
handPoints = helper.removePoints(handRight)
'\n experimenting with scaling \n '
p1 = [handPoints[0], handPoints[1]]
p2 = [handPoints[18], handPoints[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
(Result, Points) = scale.scalePoints(handPoints, distance)
(handRightResults, handRightPoints) = move.centerPoints(handPoints)
'\n extracting data from db\n '
connection = sqlite3.connect('data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT x1,y1'
for x in range(2, 22):
sql = ((((sql + ',x') + str(x)) + ',y') + str(x))
sql = (sql + ' FROM rightHandDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
feature_res = np.asarray(feature_res)
features = []
for x in feature_res:
features.append(x)
crsr.execute('SELECT label FROM rightHandDataset WHERE 1')
label_res = crsr.fetchall()
labels = []
for x in label_res:
labels.append(x)
le = preprocessing.LabelEncoder()
label_encoded = le.fit_transform(labels)
label_encoded = to_categorical(label_encoded)
(X_train, X_test, y_train, y_test) = train_test_split(features, label_encoded, test_size=0.2)
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
y_pred = model.predict(scaler.transform(np.array([handRightResults])))
C = np.argmax(y_pred)
result = le.inverse_transform([C])
return result[0]
else:
return 'no confidence'
|
def signal_handler(signal, frame):
shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly)
shutil.rmtree('gui\\captured_images', ignore_errors=True, onerror=handleRemoveReadonly)
shutil.rmtree('gui\\temp_images', ignore_errors=True, onerror=handleRemoveReadonly)
print('All done')
sys.exit(0)
|
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)):
os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO))
func(path)
else:
raise Exception
|
def plotPose(posePoints, handRightPoints, handLeftPoints):
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
background = 'PSL\\BLACK_background.jpg'
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
return frame
|
@eel.expose
def capture_alphabet_dataset(sec):
global remfileNames
'\n ----------------------Start OpenPoseDemo.exe----------------------\n --render_pose 0 --display 0\n '
os.chdir('bin\\openpose')
print('Starting OpenPose')
subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --number_people_max 1', shell=True)
os.chdir('..\\..')
'\n ----------------------Creating temp folder----------------------\n '
dirName = 'Keypoints'
init_file = 'PSL\\000000000000_keypoints.json'
try:
os.mkdir(dirName)
os.mkdir('gui\\captured_images')
os.mkdir('gui\\temp_images')
shutil.copy(init_file, dirName)
print('Directory ', dirName, ' Created ')
except FileExistsError:
print('Directory ', dirName, ' already exists')
'\n ----------------------Live View----------------------\n '
t = (time.time() + sec)
while (time.time() <= t):
eel.sleep(0.05)
os.system('taskkill /f /im OpenPoseDemo.exe')
'\n ---------------------- Auto Remove files----------------------\n '
conf_thershold = 10
fileNames = []
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
fileNames.append(entry.name)
for x in range(len(fileNames)):
js = json.loads(open(('Keypoints\\' + fileNames[x])).read())
for items in js['people']:
handRight = items['hand_right_keypoints_2d']
confPoints = helper.confidencePoints(handRight)
confidence = helper.confidence(confPoints)
print(confidence)
if (confidence < conf_thershold):
os.remove(('Keypoints\\' + fileNames[x]))
'\n ----------------------plot and save----------------------\n '
background = 'big_background.png'
fileNames = []
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
fileNames.append(entry.name)
frame = cv2.imread(background)
i = 1
for x in range(len(fileNames)):
js = json.loads(open(('Keypoints\\' + fileNames[x])).read())
for items in js['people']:
handRight = items['hand_right_keypoints_2d']
handPoints = helper.removePoints(handRight)
p1 = [handPoints[0], handPoints[1]]
p2 = [handPoints[18], handPoints[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
(Result, Points) = scale.dummy_scalePoints(handPoints, distance)
(handRightResults, handRightPoints) = move.dummy_centerPoints(Result)
frame = plot.plot_dataset(handRightPoints, 'black')
cv2.imwrite((('gui\\captured_images\\' + str(i)) + '.jpg'), frame)
i += 1
'\n ----------------------get ref to delete files----------------------\n '
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
remfileNames.append(entry.name)
'\n ----------------------end capture_alphabet_dataset(sec)----------------------\n '
|
@eel.expose
def getFileCount():
Names = []
for entry in os.scandir('gui\\captured_images'):
Names.append(entry.name)
return str(len(Names))
|
@eel.expose
def delete_Image(i):
global remfileNames
print(remfileNames)
try:
os.remove(('Keypoints\\' + remfileNames[(i - 1)]))
os.remove((('gui\\captured_images\\' + str(i)) + '.jpg'))
except:
print('file not found')
pass
|
@eel.expose
def getlabel(a):
label = a.strip()
print(label)
"\n traverse 'dataset' folder ,\n find subfolder matching 'label' ,\n create folder with timestamp in matched folder , \n and copy everything from 'Keypoints_temp' to created folder\n "
for entry in os.scandir('data\\datasets\\alphabets_dataset'):
if (entry.name == label):
now = datetime.now()
timestamp = str(datetime.timestamp(now))
dir_name = ((('data\\datasets\\alphabets_dataset\\' + entry.name) + '\\') + timestamp)
try:
os.mkdir(dir_name)
print('Directory ', dir_name, ' Created ')
except FileExistsError:
print('Directory ', dir_name, ' already exists')
copy_tree('Keypoints', ((('data\\datasets\\alphabets_dataset\\' + entry.name) + '\\') + timestamp))
' \n Remove temp folders \n '
try:
shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly)
shutil.rmtree('gui\\captured_images', ignore_errors=True, onerror=handleRemoveReadonly)
shutil.rmtree('gui\\temp_images', ignore_errors=True, onerror=handleRemoveReadonly)
print('Keypoints_temp folder removed')
except:
print('not removed')
pass
|
@eel.expose
def db_train():
retrain.re_train(1)
|
def signal_handler(signal, frame):
shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly)
print('All done')
sys.exit(0)
|
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)):
os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO))
func(path)
else:
raise Exception
|
def json_files(Dir):
folders = []
files = []
fileNames = []
for entry in os.scandir(Dir):
if entry.is_dir():
folders.append(entry.path)
for entry1 in os.scandir(entry.path):
if entry1.is_dir():
folders.append(entry1.path)
for entry2 in os.scandir(entry1.path):
if entry2.is_dir():
folders.append(entry2.path)
elif entry2.is_file():
if (os.path.splitext(entry2)[1] == '.json'):
files.append(entry2.path)
fileNames.append(entry2.name)
elif entry1.is_file():
if (os.path.splitext(entry1)[1] == '.json'):
files.append(entry1.path)
fileNames.append(entry1.name)
elif entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
files.append(entry.path)
fileNames.append(entry.name)
return (files, fileNames, folders)
|
def removePoints(handRight):
handRightResults = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 3):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 3):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return handRightResults
|
def getCoordPoints(handRight):
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 3):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 3):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
return handRightPoints
|
def confidencePoints(handRight):
handRightC = []
for x in range(2, len(handRight), 3):
handRightC.append(handRight[x])
return handRightC
|
def confidence(handRight):
sum = handRight[0]
for x in range(1, len(handRight)):
sum += handRight[x]
return sum
|
def seperate_points(handRight):
handRightResults = []
handRightX = []
handRightY = []
for x in range(len(handRight)):
handRightX.append(handRight[x][0])
handRightY.append(handRight[x][1])
for x in range(len(handRight)):
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return handRightResults
|
def join_points(handRight):
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
return handRightPoints
|
def isolatePoints(handRight):
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
minX = min(handRightX, key=float)
minX -= 10
for x in range(len(handRightX)):
handRightX[x] -= minX
minY = min(handRightY, key=float)
minY -= 10
for x in range(len(handRightY)):
handRightY[x] -= minY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def centerPoints(handRight):
refX = 150
refY = 150
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[0], handRightY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def dummy_centerPoints(handRight):
refX = 600
refY = 600
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[0], handRightY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def movePoints(handRight, addX, addY):
refX = (handRight[0] + addX)
refY = (handRight[1] + addY)
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[0], handRightY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.