code stringlengths 17 6.64M |
|---|
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if (conv_index == '22'):
self.vgg = nn.Sequential(*modules[:8])
elif (conv_index == '54'):
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = ((0.229 * rgb_range), (0.224 * rgb_range), (0.225 * rgb_range))
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
|
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias)
|
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=(- 1)):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = ((sign * rgb_range) * torch.Tensor(rgb_mean))
self.bias.data.div_(std)
self.requires_grad = False
|
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, bn=False, act=nn.ReLU(True)):
m = [nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), stride=stride, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if (act is not None):
m.append(act)
super(BasicBlock, self).__init__(*m)
|
class ResBlock(nn.Module):
def __init__(self, conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if (i == 0):
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
|
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if ((scale & (scale - 1)) == 0):
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, (4 * n_feat), 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
elif (scale == 3):
m.append(conv(n_feat, (9 * n_feat), 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
|
def make_model(args, parent=False):
return DDBPN(args)
|
def projection_conv(in_channels, out_channels, scale, up=True):
(kernel_size, stride, padding) = {2: (6, 2, 2), 4: (8, 4, 2), 8: (12, 8, 2)}[scale]
if up:
conv_f = nn.ConvTranspose2d
else:
conv_f = nn.Conv2d
return conv_f(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
|
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr)])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
self.conv_2 = nn.Sequential(*[projection_conv(nr, inter_channels, scale, (not up)), nn.PReLU(inter_channels)])
self.conv_3 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
def forward(self, x):
if (self.bottleneck is not None):
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out
|
class DDBPN(nn.Module):
def __init__(self, args):
super(DDBPN, self).__init__()
scale = args.scale[0]
n0 = 128
nr = 32
self.depth = 6
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
initial = [nn.Conv2d(args.n_colors, n0, 3, padding=1), nn.PReLU(n0), nn.Conv2d(n0, nr, 1), nn.PReLU(nr)]
self.initial = nn.Sequential(*initial)
self.upmodules = nn.ModuleList()
self.downmodules = nn.ModuleList()
channels = nr
for i in range(self.depth):
self.upmodules.append(DenseProjection(channels, nr, scale, True, (i > 1)))
if (i != 0):
channels += nr
channels = nr
for i in range((self.depth - 1)):
self.downmodules.append(DenseProjection(channels, nr, scale, False, (i != 0)))
channels += nr
reconstruction = [nn.Conv2d((self.depth * nr), args.n_colors, 3, padding=1)]
self.reconstruction = nn.Sequential(*reconstruction)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.initial(x)
h_list = []
l_list = []
for i in range((self.depth - 1)):
if (i == 0):
l = x
else:
l = torch.cat(l_list, dim=1)
h_list.append(self.upmodules[i](l))
l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))
h_list.append(self.upmodules[(- 1)](torch.cat(l_list, dim=1)))
out = self.reconstruction(torch.cat(h_list, dim=1))
out = self.add_mean(out)
return out
|
def make_model(args, parent=False):
return EDSR(args)
|
class EDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(EDSR, self).__init__()
n_resblock = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
scale = args.scale[0]
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
m_body = [common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblock)]
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') == (- 1)):
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name))
|
def make_model(args, parent=False):
return MDSR(args)
|
class MDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.scale_idx = 0
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
self.pre_process = nn.ModuleList([nn.Sequential(common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act)) for _ in args.scale])
m_body = [common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks)]
m_body.append(conv(n_feats, n_feats, kernel_size))
self.upsample = nn.ModuleList([common.Upsampler(conv, s, n_feats, act=False) for s in args.scale])
m_tail = [conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.pre_process[self.scale_idx](x)
res = self.body(x)
res += x
x = self.upsample[self.scale_idx](res)
x = self.tail(x)
x = self.add_mean(x)
return x
def set_scale(self, scale_idx):
self.scale_idx = scale_idx
|
def set_template(args):
if (args.template.find('jpeg') >= 0):
args.data_train = 'DIV2K_jpeg'
args.data_test = 'DIV2K_jpeg'
args.epochs = 200
args.lr_decay = 100
if (args.template.find('EDSR_paper') >= 0):
args.model = 'EDSR'
args.n_resblocks = 32
args.n_feats = 256
args.res_scale = 0.1
if (args.template.find('MDSR') >= 0):
args.model = 'MDSR'
args.patch_size = 48
args.epochs = 650
if (args.template.find('DDBPN') >= 0):
args.model = 'DDBPN'
args.patch_size = 128
args.scale = '4'
args.data_test = 'Set5'
args.batch_size = 20
args.epochs = 1000
args.lr_decay = 500
args.gamma = 0.1
args.weight_decay = 0.0001
args.loss = '1*MSE'
if (args.template.find('GAN') >= 0):
args.epochs = 200
args.lr = 5e-05
args.lr_decay = 150
|
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.scheduler = utility.make_scheduler(args, self.optimizer)
if (self.args.load != '.'):
self.optimizer.load_state_dict(torch.load(os.path.join(ckp.dir, 'optimizer.pt')))
for _ in range(len(ckp.log)):
self.scheduler.step()
self.error_last = 100000000.0
def train(self):
self.scheduler.step()
self.loss.step()
epoch = (self.scheduler.last_epoch + 1)
lr = self.scheduler.get_lr()[0]
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)))
self.loss.start_log()
self.model.train()
(timer_data, timer_model) = (utility.timer(), utility.timer())
for (batch, (lr, hr, _, idx_scale)) in enumerate(self.loader_train):
(lr, hr) = self.prepare([lr, hr])
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
(sr, sr_refine1, sr_refine2) = self.model(lr, idx_scale)
loss = ((self.loss(sr, hr) + self.loss(sr_refine1, hr)) + self.loss(sr_refine2, hr))
if (loss.item() < (self.args.skip_threshold * self.error_last)):
loss.backward()
self.optimizer.step()
else:
print('Skip this batch {}! (Loss: {})'.format((batch + 1), loss.item()))
timer_model.hold()
if (((batch + 1) % self.args.print_every) == 0):
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[((- 1), (- 1))]
def test(self):
epoch = (self.scheduler.last_epoch + 1)
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, len(self.scale)))
self.model.eval()
timer_test = utility.timer()
with torch.no_grad():
for (idx_scale, scale) in enumerate(self.scale):
eval_acc = 0
eval_acc_refine1 = 0
eval_acc_refine2 = 0
self.loader_test.dataset.set_scale(idx_scale)
tqdm_test = tqdm(self.loader_test, ncols=80)
for (idx_img, (lr, hr, filename, _)) in enumerate(tqdm_test):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if (not no_eval):
(lr, hr) = self.prepare([lr, hr])
else:
lr = self.prepare([lr])[0]
(sr, sr_refine1, sr_refine2) = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
sr_refine1 = utility.quantize(sr_refine1, self.args.rgb_range)
sr_refine2 = utility.quantize(sr_refine2, self.args.rgb_range)
save_list = [sr]
if (not no_eval):
eval_acc += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
eval_acc_refine1 += utility.calc_psnr(sr_refine1, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
eval_acc_refine2 += utility.calc_psnr(sr_refine2, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
save_list.extend([sr_refine1, sr_refine2, lr, hr])
if self.args.save_results:
self.ckp.save_results(filename, save_list, scale)
self.ckp.log[((- 1), idx_scale)] = (eval_acc / len(self.loader_test))
best = self.ckp.log.max(0)
self.ckp.write_log('[{} x{}]\tPSNR: {:.3f}, PSNR of refine1: {:.3f}, PSNR of refine2: {:.3f} (Best: {:.3f} @epoch {})'.format(self.args.data_test, scale, self.ckp.log[((- 1), idx_scale)], (eval_acc_refine1 / len(self.loader_test)), (eval_acc_refine2 / len(self.loader_test)), best[0][idx_scale], (best[1][idx_scale] + 1)))
self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
if (not self.args.test_only):
self.ckp.save(self, epoch, is_best=((best[1][0] + 1) == epoch))
def prepare(self, l, volatile=False):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(_l) for _l in l]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = (self.scheduler.last_epoch + 1)
return (epoch >= self.args.epochs)
|
def angular_error(gt_mesh_name, gen_mesh_name, sample_num):
'\n This function computes a symmetric chamfer distance, i.e. the sum of both chamfers.\n\n gt_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction\n method (see compute_metrics.py for more)\n\n gen_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction\n method (see compute_metrics.py for more)\n\n '
gt_mesh = trimesh.load_mesh(gt_mesh_name)
gen_mesh = trimesh.load_mesh(gen_mesh_name)
(gt_points, gt_face_index) = trimesh.sample.sample_surface(gt_mesh, sample_num)
(gen_points, gen_face_index) = trimesh.sample.sample_surface(gen_mesh, sample_num)
gt_normals = gt_mesh.face_normals[gt_face_index]
gen_normals = gen_mesh.face_normals[gen_face_index]
gen_points_kd_tree = KDTree(gen_points)
(gt2gen_dist, gt2gen_vert_ids) = gen_points_kd_tree.query(gt_points)
gt2gen_closest_normals_on_gen = gen_normals[gt2gen_vert_ids]
gt2gen_cos_sim = np.mean(np.einsum('nk,nk->n', gt_normals, gt2gen_closest_normals_on_gen))
gt_points_kd_tree = KDTree(gt_points)
(gen2gt_dist, gen2gt_vert_ids) = gt_points_kd_tree.query(gen_points)
gen2gt_closest_normals_on_gen = gt_normals[gen2gt_vert_ids]
gen2gt_cos_sim = np.mean(np.einsum('nk,nk->n', gen_normals, gen2gt_closest_normals_on_gen))
cos_sim = ((np.abs(gt2gen_cos_sim) + np.abs(gen2gt_cos_sim)) / 2)
str_ang = f'''angle: {gt2gen_cos_sim:.6f} {gen2gt_cos_sim:.6f} {cos_sim:.6f}
'''
return (str_ang, cos_sim)
|
def print_matching(list_a, list_b):
counter = 0
for (a, b) in zip(list_a, list_b):
counter += 1
print(f'Matched {a} and {b}')
print(f'Matched {counter} of {len(list_a)} and {len(list_b)}')
|
def res2str(name_a, name_b, res_a2b, res_b2a, ms):
'\n this normalizes the results by bounding box diagonal\n and put into a new dict\n '
a2b_error_field = ms.mesh(3).vertex_quality_array()
b2a_error_field = ms.mesh(5).vertex_quality_array()
a2b_error_field /= res_a2b['diag_mesh_0']
b2a_error_field /= res_b2a['diag_mesh_0']
dist_Haus_a2b = a2b_error_field.max()
dist_Haus_b2a = b2a_error_field.max()
dist_symHausd = max(dist_Haus_a2b, dist_Haus_b2a)
dist_Cham_a2b = (a2b_error_field ** 2).mean()
dist_Cham_b2a = (b2a_error_field ** 2).mean()
dist_symChamf = ((dist_Cham_a2b + dist_Cham_b2a) / 2)
str_nma = f'''name_a: {name_a}
'''
str_nmb = f'''name_b: {name_b}
'''
str_itm = f'''---- a2b b2a sym
'''
str_hau = f'''haus: {dist_Haus_a2b:.6e} {dist_Haus_b2a:.6e} {dist_symHausd:.6e}
'''
str_chm = f'''chamfer: {dist_Cham_a2b:.6e} {dist_Cham_b2a:.6e} {dist_symChamf:.6e}
'''
str_dg0 = f'''diag a: {res_a2b['diag_mesh_0']:.6e}
'''
str_dg1 = f'''diag b: {res_a2b['diag_mesh_1']:.6e}
'''
str_num = f'''n_samples: {res_a2b['n_samples']}
'''
str_all = (((((((str_nma + str_nmb) + str_itm) + str_hau) + str_chm) + str_dg0) + str_dg1) + str_num)
return (str_all, dist_symHausd, dist_Haus_a2b, dist_Haus_b2a, dist_symChamf, dist_Cham_a2b, dist_Cham_b2a)
|
def compare_meshes(meshfile_a, meshfile_b, sample_num):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(meshfile_a)
ms.load_new_mesh(meshfile_b)
res_a2b = ms.hausdorff_distance(sampledmesh=0, targetmesh=1, savesample=True, samplevert=False, sampleedge=False, samplefauxedge=False, sampleface=True, samplenum=sample_num)
res_b2a = ms.hausdorff_distance(sampledmesh=1, targetmesh=0, savesample=True, samplevert=False, sampleedge=False, samplefauxedge=False, sampleface=True, samplenum=sample_num)
(str_res, d_haus, d_haus_a2b, d_haus_b2a, d_cham, d_cham_a2b, d_cham_b2a) = res2str(meshfile_a, meshfile_b, res_a2b, res_b2a, ms)
del ms
return (str_res, d_haus, d_cham)
|
def broyden(g, x_init, J_inv_init, max_steps=50, cvg_thresh=1e-05, dvg_thresh=1, eps=1e-06):
'Find roots of the given function g(x) = 0.\n This function is impleneted based on https://github.com/locuslab/deq.\n\n Tensor shape abbreviation:\n N: number of points\n D: space dimension\n Args:\n g (function): the function of which the roots are to be determined. shape: [N, D, 1]->[N, D, 1]\n x_init (tensor): initial value of the parameters. shape: [N, D, 1]\n J_inv_init (tensor): initial value of the inverse Jacobians. shape: [N, D, D]\n\n max_steps (int, optional): max number of iterations. Defaults to 50.\n cvg_thresh (float, optional): covergence threshold. Defaults to 1e-5.\n dvg_thresh (float, optional): divergence threshold. Defaults to 1.\n eps (float, optional): a small number added to the denominator to prevent numerical error. Defaults to 1e-6.\n\n Returns:\n result (tensor): root of the given function. shape: [N, D, 1]\n diff (tensor): corresponding loss. [N]\n valid_ids (tensor): identifiers of converged points. [N]\n '
x = x_init.clone().detach()
J_inv = J_inv_init.clone().detach()
ids_val = torch.ones(x.shape[0]).bool()
gx = g(x, mask=ids_val)
update = (- J_inv.bmm(gx))
x_opt = x.clone()
gx_norm_opt = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
delta_gx = torch.zeros_like(gx)
delta_x = torch.zeros_like(x)
ids_val = torch.ones_like(gx_norm_opt).bool()
for solvestep in range(max_steps):
delta_x[ids_val] = update
x[ids_val] += delta_x[ids_val]
delta_gx[ids_val] = (g(x, mask=ids_val) - gx[ids_val])
gx[ids_val] += delta_gx[ids_val]
gx_norm = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
ids_opt = (gx_norm < gx_norm_opt)
gx_norm_opt[ids_opt] = gx_norm.clone().detach()[ids_opt]
x_opt[ids_opt] = x.clone().detach()[ids_opt]
ids_val = ((gx_norm_opt > cvg_thresh) & (gx_norm < dvg_thresh))
if (ids_val.sum() <= 0):
break
vT = delta_x[ids_val].transpose((- 1), (- 2)).bmm(J_inv[ids_val])
a = (delta_x[ids_val] - J_inv[ids_val].bmm(delta_gx[ids_val]))
b = vT.bmm(delta_gx[ids_val])
b[(b >= 0)] += eps
b[(b < 0)] -= eps
u = (a / b)
ubmmvT = u.bmm(vT)
J_inv[ids_val] += ubmmvT
update = (- J_inv[ids_val].bmm(gx[ids_val]))
return {'result': x_opt, 'diff': gx_norm_opt, 'valid_ids': (gx_norm_opt < cvg_thresh)}
|
def calculate_iou(gt, prediction):
intersection = torch.logical_and(gt, prediction)
union = torch.logical_or(gt, prediction)
return (torch.sum(intersection) / torch.sum(union))
|
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None, use_hands=True, use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([vertex_ids['nose'], vertex_ids['reye'], vertex_ids['leye'], vertex_ids['rear'], vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs, face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'], vertex_ids['LSmallToe'], vertex_ids['LHeel'], vertex_ids['RBigToe'], vertex_ids['RSmallToe'], vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate([extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[(hand_id + tip_name)])
extra_joints_idxs = np.concatenate([extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs', to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)
joints = torch.cat([joints, extra_joints], dim=1)
return joints
|
def chamfer_loss_separate(output, target, weight=10000.0, phase='train', debug=False):
from chamferdist.chamferdist import ChamferDistance
cdist = ChamferDistance()
(model2scan, scan2model, idx1, idx2) = cdist(output, target)
if (phase == 'train'):
return (model2scan, scan2model, idx1, idx2)
else:
return ((torch.mean(model2scan, dim=(- 1)) * weight), (torch.mean(scan2model, dim=(- 1)) * weight))
|
def normal_loss(output_normals, target_normals, nearest_idx, weight=1.0, phase='train'):
'\n Given the set of nearest neighbors found by chamfer distance, calculate the\n L1 discrepancy between the predicted and GT normals on each nearest neighbor point pairs.\n Note: the input normals are already normalized (length==1).\n '
nearest_idx = nearest_idx.expand(3, (- 1), (- 1)).permute([1, 2, 0]).long()
target_normals_chosen = torch.gather(target_normals, dim=1, index=nearest_idx)
assert (output_normals.shape == target_normals_chosen.shape)
if (phase == 'train'):
lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='mean')
return (lnormal, target_normals_chosen)
else:
lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='none')
lnormal = lnormal.mean((- 1)).mean((- 1))
return (lnormal, target_normals_chosen)
|
def color_loss(output_colors, target_colors, nearest_idx, weight=1.0, phase='train', excl_holes=False):
'\n Similar to normal loss, used in training a color prediction model.\n '
nearest_idx = nearest_idx.expand(3, (- 1), (- 1)).permute([1, 2, 0]).long()
target_colors_chosen = torch.gather(target_colors, dim=1, index=nearest_idx)
assert (output_colors.shape == target_colors_chosen.shape)
if excl_holes:
colorsum = target_colors_chosen.sum((- 1))
mask = (colorsum != 0).float().unsqueeze((- 1))
else:
mask = 1.0
if (phase == 'train'):
lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none')
lcolor = (lcolor * mask)
lcolor = lcolor.mean()
return (lcolor, target_colors_chosen)
else:
lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none')
lcolor = (lcolor * mask)
lcolor = lcolor.mean((- 1)).mean((- 1))
return (lcolor, target_colors_chosen)
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels=3, kernel_size=3, sigma=1.0, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp((- (((mgrid - mean) / (2 * std)) ** 2))))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.kernel_size = kernel_size[0]
self.dim = dim
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
pad_size = (self.kernel_size // 2)
if (self.dim == 1):
pad = F.pad(input, (pad_size, pad_size), mode='reflect')
elif (self.dim == 2):
pad = F.pad(input, (pad_size, pad_size, pad_size, pad_size), mode='reflect')
elif (self.dim == 3):
pad = F.pad(input, (pad_size, pad_size, pad_size, pad_size, pad_size, pad_size), mode='reflect')
return self.conv(pad, weight=self.weight.type_as(input), groups=self.groups)
|
class CBatchNorm2d(nn.Module):
' Conditional batch normalization layer class.\n Borrowed from Occupancy Network repo: https://github.com/autonomousvision/occupancy_networks\n Args:\n c_dim (int): dimension of latent conditioned code c\n f_channels (int): number of channels of the feature maps\n norm_method (str): normalization method\n '
def __init__(self, c_dim, f_channels, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_channels = f_channels
self.norm_method = norm_method
self.conv_gamma = nn.Conv1d(c_dim, f_channels, 1)
self.conv_beta = nn.Conv1d(c_dim, f_channels, 1)
if (norm_method == 'batch_norm'):
self.bn = nn.BatchNorm2d(f_channels, affine=False)
elif (norm_method == 'instance_norm'):
self.bn = nn.InstanceNorm2d(f_channels, affine=False)
elif (norm_method == 'group_norm'):
self.bn = nn.GroupNorm2d(f_channels, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert (x.size(0) == c.size(0))
assert (c.size(1) == self.c_dim)
if (len(c.size()) == 2):
c = c.unsqueeze(2)
gamma = self.conv_gamma(c).unsqueeze((- 1))
beta = self.conv_beta(c).unsqueeze((- 1))
net = self.bn(x)
out = ((gamma * net) + beta)
return out
|
class Conv2DBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=4, stride=2, padding=1, use_bias=False, use_bn=True, use_relu=True):
super(Conv2DBlock, self).__init__()
self.use_bn = use_bn
self.use_relu = use_relu
self.conv = nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias)
if use_bn:
self.bn = nn.BatchNorm2d(output_nc, affine=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
if self.use_relu:
x = self.relu(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
|
class UpConv2DBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=4, stride=2, padding=1, use_bias=False, use_bn=True, up_mode='upconv', use_dropout=False):
super(UpConv2DBlock, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.use_bn = use_bn
self.use_dropout = use_dropout
self.relu = nn.ReLU()
if (up_mode == 'upconv'):
self.up = nn.ConvTranspose2d(input_nc, output_nc, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias)
else:
self.up = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2, align_corners=False), nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=1, stride=1))
if use_bn:
self.bn = nn.BatchNorm2d(output_nc, affine=False)
if use_dropout:
self.drop = nn.Dropout(0.5)
def forward(self, x, skip_input=None):
x = self.relu(x)
x = self.up(x)
if self.use_bn:
x = self.bn(x)
if self.use_dropout:
x = self.drop(x)
if (skip_input is not None):
x = torch.cat([x, skip_input], 1)
return x
|
class GeomConvLayers(nn.Module):
'\n A few convolutional layers to smooth the geometric feature tensor\n '
def __init__(self, input_nc=16, hidden_nc=16, output_nc=16, use_relu=False):
super().__init__()
self.use_relu = use_relu
self.conv1 = nn.Conv2d(input_nc, hidden_nc, kernel_size=5, stride=1, padding=2, bias=False)
self.conv2 = nn.Conv2d(hidden_nc, hidden_nc, kernel_size=5, stride=1, padding=2, bias=False)
self.conv3 = nn.Conv2d(hidden_nc, output_nc, kernel_size=5, stride=1, padding=2, bias=False)
if use_relu:
self.relu = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
if self.use_relu:
x = self.relu(x)
x = self.conv2(x)
if self.use_relu:
x = self.relu(x)
x = self.conv3(x)
return x
|
class GeomConvBottleneckLayers(nn.Module):
'\n A u-net-like small bottleneck network for smoothing the geometric feature tensor\n '
def __init__(self, input_nc=16, hidden_nc=16, output_nc=16, use_relu=False):
super().__init__()
self.use_relu = use_relu
self.conv1 = nn.Conv2d(input_nc, hidden_nc, kernel_size=4, stride=2, padding=1, bias=False)
self.conv2 = nn.Conv2d(hidden_nc, (hidden_nc * 2), kernel_size=4, stride=2, padding=1, bias=False)
self.conv3 = nn.Conv2d((hidden_nc * 2), (hidden_nc * 4), kernel_size=4, stride=2, padding=1, bias=False)
self.up1 = nn.ConvTranspose2d((hidden_nc * 4), (hidden_nc * 2), kernel_size=4, stride=2, padding=1, bias=False)
self.up2 = nn.ConvTranspose2d((hidden_nc * 2), hidden_nc, kernel_size=4, stride=2, padding=1, bias=False)
self.up3 = nn.ConvTranspose2d(hidden_nc, output_nc, kernel_size=4, stride=2, padding=1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.up1(x)
x = self.up2(x)
x = self.up3(x)
return x
|
class GaussianSmoothingLayers(nn.Module):
'\n use a fixed, not-trainable gaussian smoother layers for smoothing the geometric feature tensor\n '
def __init__(self, channels=16, kernel_size=5, sigma=1.0):
super().__init__()
self.conv1 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
self.conv2 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
self.conv3 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
|
class UnetNoCond5DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super().__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((4 * nf) * 2), (2 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv5 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode=up_mode)
if return_2branches:
self.upconvN4 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode=up_mode)
self.upconvN5 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
u1 = self.upconv1(d5, d4)
u2 = self.upconv2(u1, d3)
u3 = self.upconv3(u2, d2)
u4 = self.upconv4(u3, d1)
u5 = self.upconv5(u4)
if self.return_2branches:
uN4 = self.upconvN4(u3, d1)
uN5 = self.upconvN5(uN4)
return (u5, uN5)
return u5
|
class UnetNoCond6DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super(UnetNoCond6DS, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv6 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((4 * nf) * 3), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconvC5 = UpConv2DBlock(((2 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC6 = UpConv2DBlock(((1 * nf) * 3), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upsample')
if return_2branches:
self.upconvN5 = UpConv2DBlock(((2 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN6 = UpConv2DBlock(((1 * nf) * 3), 3, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
d6 = self.conv6(d5)
u1 = self.upconv1(d6, d5)
u2 = self.upconv2(u1, d4)
u3 = self.upconv3(u2, d3)
u4 = self.upconv4(u3, d2)
uc5 = self.upconvC5(u4, d1)
uc6 = self.upconvC6(uc5)
if self.return_2branches:
un5 = self.upconvN5(u4, d1)
un6 = self.upconvN6(un5)
return (uc6, un6)
return uc6
|
class UnetNoCond7DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super(UnetNoCond7DS, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv6 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv7 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((8 * nf) * 2), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconvC5 = UpConv2DBlock(((4 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC6 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC7 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upsample')
if return_2branches:
self.upconvN5 = UpConv2DBlock(((4 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN6 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN7 = UpConv2DBlock(((1 * nf) * 2), 3, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
d6 = self.conv6(d5)
d7 = self.conv7(d6)
u1 = self.upconv1(d7, d6)
u2 = self.upconv2(u1, d5)
u3 = self.upconv3(u2, d4)
u4 = self.upconv3(u3, d3)
uc5 = self.upconvC5(u4, d2)
uc6 = self.upconvC6(uc5, d1)
uc7 = self.upconvC7(uc6)
if self.return_2branches:
un5 = self.upconvN5(u4, d2)
un6 = self.upconvN6(un5, d1)
un7 = self.upconvN7(un6)
return (uc7, un7)
return uc7
|
class ShapeDecoder(nn.Module):
'\n The "Shape Decoder" in the POP paper Fig. 2. The same as the "shared MLP" in the SCALE paper.\n - with skip connection from the input features to the 4th layer\'s output features (like DeepSDF)\n - branches out at the second-to-last layer, one branch for position pred, one for normal pred\n '
def __init__(self, in_size, hsize=256, actv_fn='softplus'):
self.hsize = hsize
super(ShapeDecoder, self).__init__()
self.conv1 = torch.nn.Conv1d(in_size, self.hsize, 1)
self.conv2 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv3 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv4 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv5 = torch.nn.Conv1d((self.hsize + in_size), self.hsize, 1)
self.conv6 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv7 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv8 = torch.nn.Conv1d(self.hsize, 3, 1)
self.conv6N = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv7N = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv8N = torch.nn.Conv1d(self.hsize, 3, 1)
self.bn1 = torch.nn.BatchNorm1d(self.hsize)
self.bn2 = torch.nn.BatchNorm1d(self.hsize)
self.bn3 = torch.nn.BatchNorm1d(self.hsize)
self.bn4 = torch.nn.BatchNorm1d(self.hsize)
self.bn5 = torch.nn.BatchNorm1d(self.hsize)
self.bn6 = torch.nn.BatchNorm1d(self.hsize)
self.bn7 = torch.nn.BatchNorm1d(self.hsize)
self.bn6N = torch.nn.BatchNorm1d(self.hsize)
self.bn7N = torch.nn.BatchNorm1d(self.hsize)
self.actv_fn = (nn.ReLU() if (actv_fn == 'relu') else nn.Softplus())
def forward(self, x):
x1 = self.actv_fn(self.bn1(self.conv1(x)))
x2 = self.actv_fn(self.bn2(self.conv2(x1)))
x3 = self.actv_fn(self.bn3(self.conv3(x2)))
x4 = self.actv_fn(self.bn4(self.conv4(x3)))
x5 = self.actv_fn(self.bn5(self.conv5(torch.cat([x, x4], dim=1))))
x6 = self.actv_fn(self.bn6(self.conv6(x5)))
x7 = self.actv_fn(self.bn7(self.conv7(x6)))
x8 = self.conv8(x7)
xN6 = self.actv_fn(self.bn6N(self.conv6N(x5)))
xN7 = self.actv_fn(self.bn7N(self.conv7N(xN6)))
xN8 = self.conv8N(xN7)
return (x8, xN8)
|
class PreDeformer(nn.Module):
'\n '
def __init__(self, in_size, out_size=3, hsize=64, actv_fn='softplus'):
self.hsize = hsize
super(PreDeformer, self).__init__()
self.conv1 = torch.nn.Conv1d(in_size, self.hsize, 1)
self.conv2 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv3 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv4 = torch.nn.Conv1d(self.hsize, out_size, 1)
self.bn1 = torch.nn.BatchNorm1d(self.hsize)
self.bn2 = torch.nn.BatchNorm1d(self.hsize)
self.bn3 = torch.nn.BatchNorm1d(self.hsize)
self.actv_fn = (nn.ReLU() if (actv_fn == 'relu') else nn.Softplus())
def forward(self, x):
x1 = self.actv_fn(self.bn1(self.conv1(x)))
x2 = self.actv_fn(self.bn2(self.conv2(x1)))
x3 = self.actv_fn(self.bn3(self.conv3(x2)))
x4 = self.conv4(x3)
return x4
|
def loadShader(shaderType, shaderFile):
strFilename = findFileOrThrow(shaderFile)
shaderData = None
print(f'Found shader filename = {strFilename}')
with open(strFilename, 'r') as f:
shaderData = f.read()
shader = glCreateShader(shaderType)
glShaderSource(shader, shaderData)
glCompileShader(shader)
status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetShaderInfoLog(shader)
strShaderType = ''
if (shaderType is GL_VERTEX_SHADER):
strShaderType = 'vertex'
elif (shaderType is GL_GEOMETRY_SHADER):
strShaderType = 'geometry'
elif (shaderType is GL_FRAGMENT_SHADER):
strShaderType = 'fragment'
print(((('Compilation failure for ' + strShaderType) + ' shader:\n') + str(strInfoLog)))
return shader
|
def createProgram(shaderList):
program = glCreateProgram()
for shader in shaderList:
glAttachShader(program, shader)
glLinkProgram(program)
status = glGetProgramiv(program, GL_LINK_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetProgramInfoLog(program)
print(('Linker failure: \n' + str(strInfoLog)))
for shader in shaderList:
glDetachShader(program, shader)
return program
|
def findFileOrThrow(strBasename):
if os.path.isfile(strBasename):
return strBasename
LOCAL_FILE_DIR = ('data' + os.sep)
GLOBAL_FILE_DIR = (((os.path.dirname(os.path.abspath(__file__)) + os.sep) + 'data') + os.sep)
strFilename = (LOCAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
strFilename = (GLOBAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
raise IOError(('Could not find target file ' + strBasename))
|
def tensor2numpy(tensor):
if isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
|
def vertex_normal_2_vertex_color(vertex_normal):
import torch
if torch.is_tensor(vertex_normal):
vertex_normal = vertex_normal.detach().cpu().numpy()
normal_length = ((vertex_normal ** 2).sum(1) ** 0.5)
normal_length = normal_length.reshape((- 1), 1)
vertex_normal /= normal_length
color = (((vertex_normal * 255) / 2.0) + 128)
return color.astype(np.ubyte)
|
def export_ply_with_vquality(filename, v_array=None, f_array=None, vq_array=None):
'\n v_array: vertex array\n vq_array: vertex quality array\n '
Nv = (v_array.shape[0] if (v_array is not None) else 0)
Nf = (f_array.shape[0] if (f_array is not None) else 0)
with open(filename, 'w') as plyfile:
plyfile.write(f'''ply
''')
plyfile.write(f'''format ascii 1.0
''')
plyfile.write(f'''comment trisst custom
''')
plyfile.write(f'''element vertex {Nv}
''')
plyfile.write(f'''property float x
''')
plyfile.write(f'''property float y
''')
plyfile.write(f'''property float z
''')
if (vq_array is not None):
plyfile.write(f'''property float quality
''')
plyfile.write(f'''element face {Nf}
''')
plyfile.write(f'''property list uchar int vertex_indices
''')
plyfile.write(f'''end_header
''')
for i in range(Nv):
plyfile.write(f'{v_array[i][0]} {v_array[i][1]} {v_array[i][2]} ')
if (vq_array is None):
plyfile.write('\n')
continue
plyfile.write(f'{vq_array[i]} ')
plyfile.write('\n')
continue
for i in range(Nf):
plyfile.write(f'''3 {f_array[i][0]} {f_array[i][1]} {f_array[i][2]}
''')
|
def customized_export_ply(outfile_name, v, f=None, v_n=None, v_c=None, f_c=None, e=None):
"\n Author: Jinlong Yang, jyang@tue.mpg.de\n\n Exports a point cloud / mesh to a .ply file\n supports vertex normal and color export\n such that the saved file will be correctly displayed in MeshLab\n\n # v: Vertex position, N_v x 3 float numpy array\n # f: Face, N_f x 3 int numpy array\n # v_n: Vertex normal, N_v x 3 float numpy array\n # v_c: Vertex color, N_v x (3 or 4) uchar numpy array\n # f_n: Face normal, N_f x 3 float numpy array\n # f_c: Face color, N_f x (3 or 4) uchar numpy array\n # e: Edge, N_e x 2 int numpy array\n # mode: ascii or binary ply file. Value is {'ascii', 'binary'}\n "
v_n_flag = False
v_c_flag = False
f_c_flag = False
N_v = v.shape[0]
assert (v.shape[1] == 3)
if (not (type(v_n) == type(None))):
assert (v_n.shape[0] == N_v)
if (type(v_n) == 'torch.Tensor'):
v_n = v_n.detach().cpu().numpy()
v_n_flag = True
if (not (type(v_c) == type(None))):
assert (v_c.shape[0] == N_v)
v_c_flag = True
if (v_c.shape[1] == 3):
alpha_channel = (np.zeros((N_v, 1), dtype=np.ubyte) + 255)
v_c = np.hstack((v_c, alpha_channel))
N_f = 0
if (not (type(f) == type(None))):
N_f = f.shape[0]
assert (f.shape[1] == 3)
if (not (type(f_c) == type(None))):
assert (f_c.shape[0] == f.shape[0])
f_c_flag = True
if (f_c.shape[1] == 3):
alpha_channel = (np.zeros((N_f, 1), dtype=np.ubyte) + 255)
f_c = np.hstack((f_c, alpha_channel))
N_e = 0
if (not (type(e) == type(None))):
N_e = e.shape[0]
with open(outfile_name, 'w') as file:
file.write('ply\n')
file.write('format ascii 1.0\n')
file.write(('element vertex %d\n' % N_v))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
if v_n_flag:
file.write('property float nx\n')
file.write('property float ny\n')
file.write('property float nz\n')
if v_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
file.write(('element face %d\n' % N_f))
file.write('property list uchar int vertex_indices\n')
if f_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
if (not (N_e == 0)):
file.write(('element edge %d\n' % N_e))
file.write('property int vertex1\n')
file.write('property int vertex2\n')
file.write('end_header\n')
if (v_n_flag and v_c_flag):
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
elif v_n_flag:
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)])))
elif v_c_flag:
for i in range(0, N_v):
file.write(('%f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
else:
for i in range(0, N_v):
file.write(('%f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)])))
if f_c_flag:
for i in range(0, N_f):
file.write(('3 %d %d %d %d %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)], f_c[(i, 0)], f_c[(i, 1)], f_c[(i, 2)], f_c[(i, 3)])))
else:
for i in range(0, N_f):
file.write(('3 %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)])))
if (not (N_e == 0)):
for i in range(0, N_e):
file.write(('%d %d\n' % (e[(i, 0)], e[(i, 1)])))
|
def save_result_examples(save_dir, model_name, result_name, points, normals=None, patch_color=None, texture=None, coarse_pts=None, gt=None, epoch=None):
from os.path import join
import numpy as np
if (epoch is None):
normal_fn = '{}_{}_pred.ply'.format(model_name, result_name)
else:
normal_fn = '{}_epoch{}_{}_pred.ply'.format(model_name, str(epoch).zfill(4), result_name)
normal_fn = join(save_dir, normal_fn)
points = tensor2numpy(points)
if (normals is not None):
normals = tensor2numpy(normals)
color_normal = vertex_normal_2_vertex_color(normals)
customized_export_ply(normal_fn, v=points, v_n=normals, v_c=color_normal)
if (patch_color is not None):
patch_color = tensor2numpy(patch_color)
if (patch_color.max() < 1.1):
patch_color = (patch_color * 255.0).astype(np.ubyte)
pcolor_fn = normal_fn.replace('pred.ply', 'pred_patchcolor.ply')
customized_export_ply(pcolor_fn, v=points, v_c=patch_color)
if (texture is not None):
texture = tensor2numpy(texture)
if (texture.max() < 1.1):
texture = (texture * 255.0).astype(np.ubyte)
texture_fn = normal_fn.replace('pred.ply', 'pred_texture.ply')
customized_export_ply(texture_fn, v=points, v_c=texture)
if (coarse_pts is not None):
coarse_pts = tensor2numpy(coarse_pts)
coarse_fn = normal_fn.replace('pred.ply', 'interm.ply')
customized_export_ply(coarse_fn, v=coarse_pts)
if (gt is not None):
gt = tensor2numpy(gt)
gt_fn = normal_fn.replace('pred.ply', 'gt.ply')
customized_export_ply(gt_fn, v=gt)
|
def adjust_loss_weights(init_weight, current_epoch, mode='decay', start=400, every=20):
if (mode != 'binary'):
if (current_epoch < start):
if (mode == 'rise'):
weight = (init_weight * 1e-06)
else:
weight = init_weight
elif (every == 0):
weight = init_weight
elif (mode == 'rise'):
weight = (init_weight * (1.05 ** ((current_epoch - start) // every)))
else:
weight = (init_weight * (0.85 ** ((current_epoch - start) // every)))
return weight
|
class HierarchicalContextAggregationLoss(nn.Module):
'\n Implementation of Hierarchical Context Aggregation\n\n This loss combines multiple PixelwiseContextual losses with different (alpha, beta) scales.\n Given a descriptor with n-dims and n-losses scales, each loss is given n-dims//n-losses.\n Theoretically, each of these losses could also have different margins, but in practice they are usually equal.\n\n Attributes can be provided as a single number (same value is used for all losses) or as a list (must contain value\n to use with each loss).\n\n Attributes:\n n_scales (int): Number of PixelwiseContextual losses\n margins (list or float): Target margin distance between positives and negatives\n alphas (list or int): Minimum distance from original positive KeyPoint\n betas (list or int): Maximum distance from original positive KeyPoint\n n-negs (list or int): Number of negative samples to generate\n\n Methods:\n forward: Compute pixel-wise contrastive loss\n forward_eval: Detailed forward pass for logging\n '
def __init__(self, n_scales=1, margins=0.5, alphas=None, betas=None, n_negs=10):
super().__init__()
self.n_scales = n_scales
self.margins = margins
self.alphas = alphas
self.betas = betas
self.n_negs = n_negs
self._losses = self._parse_losses()
self._has_warned = False
def __repr__(self):
params = (self.n_scales, self.margins, self.alphas, self.betas, self.n_negs)
return f'{self.__class__.__qualname__}{params}'
def __str__(self):
return '__'.join([f'Loss {i}: {loss}' for (i, loss) in enumerate(self._losses)])
@staticmethod
def add_parser_args(parser):
parser.add_argument('--n-scales', default=1, type=int, help='Number of hierarchical sampling strategies')
parser.add_argument('--margins', default=0.5, type=str, nargs='*', help='List of margins for each Scale')
parser.add_argument('--alphas', default=None, type=str, nargs='*', help='List of alphas for each Scale')
parser.add_argument('--betas', default=None, type=str, nargs='*', help='List of betas for each Scale')
parser.add_argument('--n-negs', default=10, type=str, nargs='*', help='List of n-negs for each Scale')
def _parse_losses(self):
configs = [self.margins, self.alphas, self.betas, self.n_negs]
configs = ((c if isinstance(c, (list, tuple)) else ([c] * self.n_scales)) for c in configs)
configs = [*zip(*configs)]
if (len(configs) != self.n_scales):
raise ValueError(f'Invalid number of configurations. ({self.n_scales} vs. {len(configs)}) ')
return [PixelwiseContrastiveLoss(*c) for c in configs]
def forward(self, features, labels):
' Compute pixel-wise contrastive loss.\n :param features: Vertically stacked feature maps (b, n-dim, h*2, w)\n :param labels: Horizontally stacked correspondence KeyPoints (b, n-kpts, 4) -> (x1, y1, x2, y2)\n :return: Loss\n '
if ((features.shape[1] % self.n_scales) and (not self._has_warned)):
warn(f'Feature dimensions and scales are not exactly divisible. ({features.shape[1]} and {self.n_scales})')
self._has_warned = True
feature_chunks = torch.chunk(features, self.n_scales, dim=1)
return sum((loss(feat, labels) for (feat, loss) in zip(feature_chunks, self._losses)))
|
class PixelwiseContrastiveLoss(nn.Module):
'\n Implementation of "pixel-wise" contrastive loss. Contrastive loss typically compares two whole images.\n L = (Y) * (1/2 * d**2) + (1 - Y) * (1/2 * max(0, margin - d)**2)\n\n In this instance, we instead compare pairs of features within those images.\n Positive matches are given by ground truth correspondences between images.\n Negative matches are generated on-the-fly based on provided parameters.\n\n Attributes:\n margin (float): Target margin distance between positives and negatives\n alpha (int): Minimum distance from original positive KeyPoint\n beta (int): Maximum distance from original positive KeyPoint\n n-neg (int): Number of negative samples to generate\n\n Methods:\n forward: Compute pixel-wise contrastive loss\n forward_eval: Detailed forward pass for logging\n '
def __init__(self, margin=0.5, alpha=None, beta=None, n_neg=10):
super().__init__()
self.margin = margin
self.alpha = alpha
self.beta = beta
self.n_neg = n_neg
self._dist = nn.PairwiseDistance()
def __repr__(self):
return f'{self.__class__.__qualname__}({self.margin}, {self.alpha}, {self.beta}, {self.n_neg})'
def __str__(self):
return f"Min{(self.alpha or 0)}_Max{(self.beta or 'Inf')}"
@staticmethod
def create_parser(parser):
parser.add_argument('--margin', default=0.5, help='Target distance between negative feature embeddings.')
parser.add_argument('--alpha', default=None, type=float, help='Minimum distance from positive KeyPoint')
parser.add_argument('--beta', default=None, type=float, help='Maximum distance from positive KeyPoint')
parser.add_argument('--n_neg', default=10, help='Number of negative samples to generate')
def forward(self, features, labels):
' Compute pixel-wise contrastive loss.\n :param features: Vertically stacked feature maps (b, n-dim, h*2, w)\n :param labels: Horizontally stacked correspondence KeyPoints (b, n-kpts, 4) -> (x1, y1, x2, y2)\n :return: Loss\n '
(source, target) = torch.chunk(features, 2, dim=(- 2))
(source_kpts, target_kpts) = torch.chunk(labels, 2, dim=(- 1))
loss = self._positive_loss(source, target, source_kpts, target_kpts)[0]
loss += self._negative_loss(source, target, source_kpts, target_kpts)[0]
return loss
def _calc_distance(self, source, target, source_kpts, target_kpts):
source_descriptors = ops.extract_kpt_vectors(source, source_kpts).permute([0, 2, 1])
target_descriptors = ops.extract_kpt_vectors(target, target_kpts).permute([0, 2, 1])
return self._dist(source_descriptors, target_descriptors)
def _positive_loss(self, source, target, source_kpts, target_kpts):
dist = self._calc_distance(source, target, source_kpts, target_kpts)
loss = ((dist ** 2).mean() / 2)
return (loss, dist)
def _negative_loss(self, source, target, source_kpts, target_kpts):
(dsource_kpts, dtarget_kpts) = self._generate_negative_like(source, source_kpts, target_kpts)
dist = self._calc_distance(source, target, dsource_kpts, dtarget_kpts)
margin_dist = (self.margin - dist).clamp(min=0.0)
loss = ((margin_dist ** 2).mean() / 2)
return (loss, dist)
def _generate_negative_like(self, other, source_kpts, target_kpts):
source_kpts = source_kpts.repeat([1, self.n_neg, 1])
target_kpts = target_kpts.repeat([1, self.n_neg, 1])
target_kpts = self._permute_negatives(target_kpts, other.shape)
return (source_kpts, target_kpts)
def _permute_negatives(self, kpts, shape):
(h, w) = shape[(- 2):]
low = (self.alpha if self.alpha else 0)
high = (self.beta if self.beta else (max(h, w) - low))
shift = torch.randint_like(kpts, low=low, high=high)
shift *= torch.sign((torch.rand_like(shift, dtype=torch.float) - 0.5)).short()
new_kpts = (kpts + shift)
new_kpts %= torch.tensor((w, h), dtype=torch.short, device=new_kpts.device)
diffs = (new_kpts - kpts)
diff_clamp = torch.clamp(diffs, min=(- high), max=high)
new_kpts += (diff_clamp - diffs)
return new_kpts
|
class SSIM(nn.Module):
'Layer to compute the weighted SSIM and L1 loss between a pair of images'
def __init__(self, ssim_weight=0.85):
super().__init__()
self.a = ssim_weight
self.b = (1 - ssim_weight)
self.pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = (0.01 ** 2)
self.C2 = (0.03 ** 2)
def forward(self, pred, target):
l1_loss = torch.abs((target - pred)).mean(1, keepdim=True)
(pred, target) = (self.refl(pred), self.refl(target))
(mu_pred, mu_target) = (self.pool(pred), self.pool(target))
sigma_pred = (self.pool((pred ** 2)) - (mu_pred ** 2))
sigma_target = (self.pool((target ** 2)) - (mu_target ** 2))
sigma_pt = (self.pool((pred * target)) - (mu_pred * mu_target))
ssim_n = ((((2 * mu_pred) * mu_target) + self.C1) * ((2 * sigma_pt) + self.C2))
ssim_d = ((((mu_pred ** 2) + (mu_target ** 2)) + self.C1) * ((sigma_pred + sigma_target) + self.C2))
sim = torch.clamp(((1 - (ssim_n / ssim_d)) / 2), min=0, max=1)
sim = sim.mean(1, keepdim=True)
loss = ((self.a * sim) + (self.b * l1_loss))
return loss
|
@dataclass(eq=False)
class BaseModel(nn.Module):
'\n Base class for PyTorch networks, expanding nn.Module.\n\n Initialization parameters will be automatically added to the state_dict and checked when loading a checkpoint\n\n Required:\n :method forward: Model forward pass (PyTorch standard)\n\n Helpers:\n :classmethod from_ckpt: Restores a model from a checkpoint file. Can make use of overloaded state_dicts.\n :staticmethod add_parser_args: Add any additional arguments required for command line parsing\n :method state_dict: PyTorch state_dict + additional parameters needed for initialization\n :method load_state_dict: Check init parameters match the loaded state_dict and PyTorch load\n\n '
def __post_init__(self):
'Initialize network and nn.Module.'
super().__init__()
def __hash__(self):
return super().__hash__()
@classmethod
def from_ckpt(cls, ckpt_file, key=None, strict=True):
'\n Create network from a saved checkpoint.\n\n :param ckpt_file: File containing saved checkpoint.\n :param key: Function of one argument used to extract the network state_dict (same as built-in "sort" key)\n :param strict: Strictly enforce matching keys between the checkpoint and the model.\n :return: Restored class\n '
ckpt_dict = torch.load(ckpt_file)
ckpt_dict = (key(ckpt_dict) if key else ckpt_dict)
manager = (nullcontext() if strict else suppress(KeyError))
with manager:
kwargs = {k: ckpt_dict[k] for k in cls.__dataclass_fields__}
model = cls(**kwargs)
model.load_state_dict(ckpt_dict, strict=strict)
return model
@classmethod
def from_dict(cls, in_dict):
'Instantiate class from dict. Ignores any unrecognized arguments.'
new_dict = {k: v for (k, v) in in_dict.items() if (k in cls.__dataclass_fields__)}
return cls(**new_dict)
@staticmethod
def add_parser_args(parser):
'Add required parameters for parsing.'
raise NotImplementedError
def state_dict(self, destination=None, prefix='', keep_vars=False):
state_dict = super().state_dict(destination, prefix, keep_vars)
for k in self.__dataclass_fields__:
state_dict[k] = getattr(self, k)
return state_dict
def load_state_dict(self, state_dict, strict=True):
state_dict = state_dict.copy()
manager = (nullcontext() if strict else suppress(KeyError))
for k in self.__dataclass_fields__:
with manager:
v = state_dict.pop(k)
assert (self.__dict__[k] == v), f'Parameter "{k}" mismatch. ({self.__dict__[k]} vs. {v})'
super().load_state_dict(state_dict, strict=strict)
def forward(self, *args, **kwargs):
'Network forward pass.'
raise NotImplementedError
|
class ConvBlock(nn.Module):
def __init__(self, in_ch, out_ch, k_size, stride=1, padding=None, dilation=1, *, bias=False, batch_norm=True, momentum=0.1, activation=None, drop_rate=None):
super().__init__()
layers = OrderedDict()
padding = (padding or (dilation if (dilation > 1) else (k_size // 2)))
if padding:
layers['pad'] = nn.ReflectionPad2d(padding)
layers['conv'] = nn.Conv2d(in_ch, out_ch, k_size, stride, dilation=dilation, bias=bias)
if batch_norm:
layers['bn'] = nn.BatchNorm2d(out_ch, momentum=momentum)
if activation:
layers['act'] = activation(inplace=True)
if drop_rate:
layers['drop'] = nn.Dropout2d(drop_rate, inplace=True)
self.layers = nn.Sequential(layers)
def forward(self, x):
return self.layers(x)
|
class ResidualBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1, padding=None, dilation=1, activation=nn.ReLU):
super().__init__()
self.block1 = ConvBlock(in_ch, out_ch, 3, stride, padding, dilation, activation=activation)
self.block2 = ConvBlock(out_ch, out_ch, 3, 1, padding, dilation)
self.downsample = (None if ((stride == 1) and (in_ch == out_ch)) else ConvBlock(in_ch, out_ch, 1, stride))
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
out += (self.downsample(x) if self.downsample else x)
return out
|
class SimpleConvBlock(nn.Module):
'Layer to perform a convolution followed by ELU'
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
return self.nonlin(self.conv(x))
|
class Conv3x3(nn.Module):
'Layer to pad and convolve input'
def __init__(self, in_channels, out_channels, use_refl=True):
super().__init__()
self.pad = (nn.ReflectionPad2d(1) if use_refl else nn.ZeroPad2d(1))
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
return self.conv(self.pad(x))
|
@dataclass(eq=False)
class DeFeatNet(BaseModel):
num_layers: int
preres: bool
scales: list = range(4)
use_skips: bool = True
n_dims: int = 3
spp_branches: list = None
activation: str = 'relu'
im_pad: int = None
norm: bool = True
def __post_init__(self):
super().__post_init__()
self.depth_net = DepthNet(self.num_layers, self.preres, self.scales, self.use_skips)
self.pose_net = PoseNet(self.num_layers, self.preres)
self.feat_net = FeatNet(self.n_dims, self.spp_branches, self.activation, self.im_pad, self.norm)
@staticmethod
def add_parser_args(parser):
DepthNet.add_parser_args(parser)
FeatNet.add_parser_args(parser)
def forward(self, target_frames, support_frames, support_idxs):
'\n :param target_frames: Frame we want to predict depth for\n :param support_frames: Previous and/or following frames\n :param support_idxs: Index wrt original frames\n :return:\n '
target_disps = self.depth_net(target_frames)
target_features = self.feat_net(target_frames)
support_features = self.feat_net(torch.cat(support_frames, dim=0)).chunk(len(support_frames), dim=0)
poses = []
for (idx, sf) in zip(support_idxs, support_frames):
inp = ((sf, target_frames) if (idx > 0) else (target_frames, sf))
poses.append(self.pose_net(*inp))
return (target_disps, target_features, support_features, poses)
|
@dataclass(eq=False)
class DepthNet(BaseModel):
num_layers: int
preres: bool = True
scales: list = range(4)
use_skips: bool = True
def __post_init__(self):
super().__post_init__()
self.enc_ch = np.array([64, 64, 128, 256, 512])
self.dec_ch = np.array([16, 32, 64, 128, 256])
if (self.num_layers > 34):
self.enc_ch[1:] *= 4
self.convs = OrderedDict()
for i in range(4, (- 1), (- 1)):
in_ch = (self.enc_ch[(- 1)] if (i == 4) else self.dec_ch[(i + 1)])
self.convs[('upconv', i, 0)] = SimpleConvBlock(in_ch, self.dec_ch[i])
in_ch = (self.dec_ch[i] + (self.enc_ch[(i - 1)] if (self.use_skips and (i > 0)) else 0))
self.convs[('upconv', i, 1)] = SimpleConvBlock(in_ch, self.dec_ch[i])
for s in self.scales:
self.convs[('dispconv', s)] = Conv3x3(self.dec_ch[s], 1)
self.encoder = getattr(models, f'resnet{self.num_layers}')(self.preres)
self.decoder = nn.ModuleList(list(self.convs.values()))
@staticmethod
def add_parser_args(parser):
parser.add_argument('--num-layers', default=18, choices=[18, 34, 50, 101, 152], type=int, help='ResNet encoder layers')
parser.add_argument('--preres', action='store_true', help='Pretrained ResNet encoder')
parser.add_argument('--scales', default=range(4), nargs='*', type=int)
parser.add_argument('--no-skips', action='store_false', dest='use_skips', help='Disable skip connections')
def forward(self, images):
(features, outputs) = ([], {})
x = ((images - 0.45) / 0.225)
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
features.append(self.encoder.relu(x))
features.append(self.encoder.layer1(self.encoder.maxpool(features[(- 1)])))
features.append(self.encoder.layer2(features[(- 1)]))
features.append(self.encoder.layer3(features[(- 1)]))
features.append(self.encoder.layer4(features[(- 1)]))
x = features[(- 1)]
for i in range(4, (- 1), (- 1)):
x = ops.upsample(self.convs[('upconv', i, 0)](x), factor=2, bilinear=False)
if (self.use_skips and (i > 0)):
x = torch.cat((x, features[(i - 1)]), dim=1)
x = self.convs[('upconv', i, 1)](x)
if (i in self.scales):
outputs[('disp', i)] = torch.sigmoid(self.convs[('dispconv', i)](x))
return outputs
|
def discriminator(glove, hidden_size):
hypo_input = Input(shape=(None,), dtype='int32')
embeds = make_fixed_embeddings(glove, None)(hypo_input)
lstm = LSTM(hidden_size, inner_activation='sigmoid')(embeds)
output = Dense(1, activation='sigmoid')(lstm)
discriminator = Model([hypo_input], output)
discriminator.compile(loss='binary_crossentropy', optimizer='adam')
return discriminator
|
def adverse_model(discriminator):
train_input = Input(shape=(None,), dtype='int32')
hypo_input = Input(shape=(None,), dtype='int32')
def margin_opt(inputs):
assert (len(inputs) == 2), ('Margin Output needs 2 inputs, %d given' % len(inputs))
return (K.log(inputs[0]) + K.log((1 - inputs[1])))
margin = Lambda(margin_opt, output_shape=(lambda s: (None, 1)))([discriminator(train_input), discriminator(hypo_input)])
adverserial = Model([train_input, hypo_input], margin)
adverserial.compile(loss=minimize, optimizer='adam')
return adverserial
|
def minimize(y_true, y_pred):
return K.abs(K.mean(y_pred, axis=(- 1)))
|
def reinit(ad_model):
ad_model.compile(loss=minimize, optimizer='adam')
|
class FeedLSTM(LSTM):
def __init__(self, feed_layer=None, **kwargs):
self.feed_layer = feed_layer
self.supports_masking = False
super(FeedLSTM, self).__init__(**kwargs)
def set_state(self, noise):
K.set_value(self.states[1], noise)
def get_initial_states(self, x):
initial_state = K.zeros_like(x)
initial_state = K.sum(initial_state, axis=1)
reducer = K.zeros((self.input_dim, self.output_dim))
initial_state = K.dot(initial_state, reducer)
initial_states = [initial_state for _ in range(len(self.states))]
if (self.feed_layer is not None):
initial_states[1] = self.feed_layer
return initial_states
|
class LstmAttentionLayer(LSTM):
def __init__(self, feed_state=False, **kwargs):
self.feed_state = feed_state
self.supports_masking = False
super(LstmAttentionLayer, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
if self.return_sequences:
return (input_shape[0][0], input_shape[0][1], self.output_dim)
else:
return (input_shape[0][0], self.output_dim)
def compute_mask(self, input, mask):
return None
def call(self, x, mask=None):
return super(LSTM, self).call(x, None)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
input_dim = input_shape[1][2]
self.input_dim = input_dim
if self.stateful:
self.reset_states()
else:
self.states = [None, None]
self.W_s = self.init((self.output_dim, self.output_dim))
self.W_t = self.init((self.output_dim, self.output_dim))
self.W_a = self.init((self.output_dim, self.output_dim))
self.w_e = K.zeros((self.output_dim,))
self.W_i = self.init(((2 * self.output_dim), self.output_dim))
self.U_i = self.inner_init((self.output_dim, self.output_dim))
self.b_i = K.zeros((self.output_dim,))
self.W_f = self.init(((2 * self.output_dim), self.output_dim))
self.U_f = self.inner_init((self.output_dim, self.output_dim))
self.b_f = self.forget_bias_init((self.output_dim,))
self.W_c = self.init(((2 * self.output_dim), self.output_dim))
self.U_c = self.inner_init((self.output_dim, self.output_dim))
self.b_c = K.zeros((self.output_dim,))
self.W_o = self.init(((2 * self.output_dim), self.output_dim))
self.U_o = self.inner_init((self.output_dim, self.output_dim))
self.b_o = K.zeros((self.output_dim,))
self.trainable_weights = [self.W_s, self.W_t, self.W_a, self.w_e, self.W_i, self.U_i, self.b_i, self.W_c, self.U_c, self.b_c, self.W_f, self.U_f, self.b_f, self.W_o, self.U_o, self.b_o]
def preprocess_input(self, x):
return x[0]
def set_state(self, noise):
K.set_value(self.states[1], noise)
def get_constants(self, x):
return [x[1], K.dot(x[1], self.W_s)]
def get_initial_states(self, x):
initial_state = K.zeros_like(x[0])
initial_state = K.sum(initial_state, axis=1)
reducer = K.zeros((self.output_dim, self.output_dim))
initial_state = K.dot(initial_state, reducer)
initial_states = [initial_state for _ in range(len(self.states))]
if self.feed_state:
initial_states[1] = x[2]
return initial_states
def step(self, x, states):
h_s = states[2]
P_j = states[3]
P_t = K.dot(x, self.W_t)
P_a = K.dot(states[0], self.W_a)
sum3 = ((P_j + P_t.dimshuffle((0, 'x', 1))) + P_a.dimshuffle((0, 'x', 1)))
E_kj = K.tanh(sum3).dot(self.w_e)
Alpha_kj = K.softmax(E_kj)
weighted = (h_s * Alpha_kj.dimshuffle((0, 1, 'x')))
a_k = weighted.sum(axis=1)
m_k = K.T.concatenate([a_k, x], axis=1)
x_i = (K.dot(m_k, self.W_i) + self.b_i)
x_f = (K.dot(m_k, self.W_f) + self.b_f)
x_c = (K.dot(m_k, self.W_c) + self.b_c)
x_o = (K.dot(m_k, self.W_o) + self.b_o)
i = self.inner_activation((x_i + K.dot(states[0], self.U_i)))
f = self.inner_activation((x_f + K.dot(states[0], self.U_f)))
c = ((f * states[1]) + (i * self.activation((x_c + K.dot(states[0], self.U_c)))))
o = self.inner_activation((x_o + K.dot(states[0], self.U_o)))
h = (o * self.activation(c))
return (h, [h, c])
def get_config(self):
config = {'feed_state': self.feed_state}
base_config = super(LstmAttentionLayer, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
|
def train(train, dev, model, model_dir, batch_size):
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
es = EarlyStopping(patience=2)
saver = ModelCheckpoint((model_dir + '/model.weights'), monitor='val_loss', save_best_only=True)
csv = CsvHistory((model_dir + '/history.csv'))
return model.fit([train[0], train[1]], train[2], batch_size=batch_size, nb_epoch=1000, validation_data=([dev[0], dev[1]], dev[2]), callbacks=[saver, es, csv])
|
def attention_model(hidden_size, glove):
prem_input = Input(shape=(None,), dtype='int32')
hypo_input = Input(shape=(None,), dtype='int32')
prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
premise_layer = LSTM(output_dim=hidden_size, return_sequences=True, inner_activation='sigmoid')(prem_embeddings)
hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True, inner_activation='sigmoid')(hypo_embeddings)
attention = LstmAttentionLayer(output_dim=hidden_size)([hypo_layer, premise_layer])
final_dense = Dense(3, activation='softmax')(attention)
model = Model(input=[prem_input, hypo_input], output=final_dense)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def attention_bnorm_model(hidden_size, glove):
prem_input = Input(shape=(None,), dtype='int32')
hypo_input = Input(shape=(None,), dtype='int32')
prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
premise_layer = LSTM(output_dim=hidden_size, return_sequences=True, inner_activation='sigmoid')(prem_embeddings)
premise_bn = BatchNormalization()(premise_layer)
hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True, inner_activation='sigmoid')(hypo_embeddings)
hypo_bn = BatchNormalization()(hypo_layer)
attention = LstmAttentionLayer(output_dim=hidden_size)([hypo_bn, premise_bn])
att_bn = BatchNormalization()(attention)
final_dense = Dense(3, activation='softmax')(att_bn)
model = Model(input=[prem_input, hypo_input], output=final_dense)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def make_fixed_embeddings(glove, seq_len):
glove_mat = np.array(glove.values())
return Embedding(input_dim=glove_mat.shape[0], output_dim=glove_mat.shape[1], weights=[glove_mat], trainable=False, input_length=seq_len)
|
class CsvHistory(Callback):
def __init__(self, filename):
self.file = open(filename, 'a', 0)
self.writer = csv.writer(self.file)
self.header = True
def on_epoch_end(self, epoch, logs={}):
if self.header:
self.writer.writerow((['epoch'] + logs.keys()))
self.header = False
self.writer.writerow(([epoch] + [('%0.4f' % v) for v in logs.values()]))
def on_train_end(self, logs={}):
self.file.close()
|
def merge_result_batches(batches):
res = list(batches[0])
for i in range(1, len(batches)):
for j in range(len(res)):
res[j] = np.concatenate([res[j], batches[i][j]])
return res
|
class HierarchicalSoftmax(Layer):
def __init__(self, output_dim, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.output_dim = output_dim
def hshape(n):
from math import sqrt, ceil
l1 = ceil(sqrt(n))
l2 = ceil((n / l1))
return (int(l1), int(l2))
(self.n_classes, self.n_outputs_per_class) = hshape(output_dim)
super(HierarchicalSoftmax, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
input_dim = self.input_spec[0].shape[(- 1)]
self.W1 = self.init((input_dim, self.n_classes), name='{}_W1'.format(self.name))
self.b1 = K.zeros((self.n_classes,), name='{}_b1'.format(self.name))
self.W2 = self.init((self.n_classes, input_dim, self.n_outputs_per_class), name='{}_W2'.format(self.name))
self.b2 = K.zeros((self.n_classes, self.n_outputs_per_class), name='{}_b2'.format(self.name))
self.trainable_weights = [self.W1, self.b1, self.W2, self.b2]
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1], None)
def call(self, X, mask=None):
input_shape = self.input_spec[0].shape
x = K.reshape(X[0], ((- 1), input_shape[2]))
target = (X[1].flatten() if self.trainable else None)
Y = h_softmax(x, K.shape(x)[0], self.output_dim, self.n_classes, self.n_outputs_per_class, self.W1, self.b1, self.W2, self.b2, target)
output_dim = (1 if self.trainable else self.output_dim)
input_length = K.shape(X[0])[1]
y = K.reshape(Y, ((- 1), input_length, output_dim))
return y
def get_config(self):
config = {'output_dim': self.output_dim, 'init': self.init.__name__}
base_config = super(HierarchicalSoftmax, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
|
def hs_categorical_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, _EPSILON, (1.0 - _EPSILON))
return T.nnet.categorical_crossentropy(y_pred, y_true)
|
def _remove_duplicate(input):
return list(set(input))
|
def build_stage_one_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
for i in range((len(sample_idx) - 1)):
edge_idx.append((sample_idx[(i + 1)], sample_idx[i]))
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def build_stage_two_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
edge_idx.extend([(sample_idx[0], sample_idx[i]) for i in range(1, len(sample_idx))])
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def build_cominbed_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
for i in range((len(sample_idx) - 1)):
edge_idx.append((sample_idx[(i + 1)], sample_idx[i]))
edge_idx.extend([(sample_idx[0], sample_idx[i]) for i in range(1, len(sample_idx))])
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def expand_level2():
level2 = ['001-009', '010-018', '020-027', '030-041', '042', '045-049', '050-059', '060-066', '070-079', '080-088', '090-099', '100-104', '110-118', '120-129', '130-136', '137-139', '140-149', '150-159', '160-165', '170-176', '176', '179-189', '190-199', '200-208', '209', '210-229', '230-234', '235-238', '239', '240-246', '249-259', '260-269', '270-279', '280-289', '290-294', '295-299', '300-316', '317-319', '320-327', '330-337', '338', '339', '340-349', '350-359', '360-379', '380-389', '390-392', '393-398', '401-405', '410-414', '415-417', '420-429', '430-438', '440-449', '451-459', '460-466', '470-478', '480-488', '490-496', '500-508', '510-519', '520-529', '530-539', '540-543', '550-553', '555-558', '560-569', '570-579', '580-589', '590-599', '600-608', '610-611', '614-616', '617-629', '630-639', '640-649', '650-659', '660-669', '670-677', '678-679', '680-686', '690-698', '700-709', '710-719', '720-724', '725-729', '730-739', '740-759', '760-763', '764-779', '780-789', '790-796', '797-799', '800-804', '805-809', '810-819', '820-829', '830-839', '840-848', '850-854', '860-869', '870-879', '880-887', '890-897', '900-904', '905-909', '910-919', '920-924', '925-929', '930-939', '940-949', '950-957', '958-959', '960-979', '980-989', '990-995', '996-999', 'V01-V91', 'V01-V09', 'V10-V19', 'V20-V29', 'V30-V39', 'V40-V49', 'V50-V59', 'V60-V69', 'V70-V82', 'V83-V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'E000-E899', 'E000', 'E001-E030', 'E800-E807', 'E810-E819', 'E820-E825', 'E826-E829', 'E830-E838', 'E840-E845', 'E846-E849', 'E850-E858', 'E860-E869', 'E870-E876', 'E878-E879', 'E880-E888', 'E890-E899', 'E900-E909', 'E910-E915', 'E916-E928', 'E929', 'E930-E949', 'E950-E959', 'E960-E969', 'E970-E978', 'E980-E989', 'E990-E999']
level2_expand = {}
for i in level2:
tokens = i.split('-')
if (i[0] == 'V'):
if (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0][1:]), (int(tokens[1][1:]) + 1)):
level2_expand[('V%02d' % j)] = i
elif (i[0] == 'E'):
if (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0][1:]), (int(tokens[1][1:]) + 1)):
level2_expand[('E%03d' % j)] = i
elif (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0]), (int(tokens[1]) + 1)):
level2_expand[('%03d' % j)] = i
return level2_expand
|
def build_icd9_tree(unique_codes):
res = []
graph_voc = Voc()
root_node = 'icd9_root'
level3_dict = expand_level2()
for code in unique_codes:
level1 = code
level2 = (level1[:4] if (level1[0] == 'E') else level1[:3])
level3 = level3_dict[level2]
level4 = root_node
sample = [level1, level2, level3, level4]
graph_voc.add_sentence(sample)
res.append(sample)
return (res, graph_voc)
|
def build_atc_tree(unique_codes):
res = []
graph_voc = Voc()
root_node = 'atc_root'
for code in unique_codes:
sample = (([code] + [code[:i] for i in [4, 3, 1]]) + [root_node])
graph_voc.add_sentence(sample)
res.append(sample)
return (res, graph_voc)
|
class BertConfig(object):
'Configuration class to store the configuration of a `BertModel`.\n '
def __init__(self, vocab_size_or_config_json_file, hidden_size=300, num_hidden_layers=2, num_attention_heads=4, intermediate_size=300, hidden_act='relu', hidden_dropout_prob=0.4, attention_probs_dropout_prob=0.1, max_position_embeddings=1, type_vocab_size=2, initializer_range=0.02, graph=False, graph_hidden_size=75, graph_heads=4):
'Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the "intermediate" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, "gelu", "relu" and "swish" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n '
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.graph = graph
self.graph_hidden_size = graph_hidden_size
self.graph_heads = graph_heads
else:
raise ValueError('First argument must be either a vocabulary size (int)or the path to a pretrained model config file (str)')
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = BertConfig(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
class OntologyEmbedding(nn.Module):
def __init__(self, voc, build_tree_func, in_channels=100, out_channels=20, heads=5):
super(OntologyEmbedding, self).__init__()
(res, graph_voc) = build_tree_func(list(voc.idx2word.values()))
stage_one_edges = build_stage_one_edges(res, graph_voc)
stage_two_edges = build_stage_two_edges(res, graph_voc)
self.edges1 = torch.tensor(stage_one_edges)
self.edges2 = torch.tensor(stage_two_edges)
self.graph_voc = graph_voc
assert (in_channels == (heads * out_channels))
self.g = GATConv(in_channels=in_channels, out_channels=out_channels, heads=heads)
num_nodes = len(graph_voc.word2idx)
self.embedding = nn.Parameter(torch.Tensor(num_nodes, in_channels))
self.idx_mapping = [self.graph_voc.word2idx[word] for word in voc.idx2word.values()]
self.init_params()
def get_all_graph_emb(self):
emb = self.embedding
emb = self.g(self.g(emb, self.edges1.to(emb.device)), self.edges2.to(emb.device))
return emb
def forward(self):
'\n :param idxs: [N, L]\n :return:\n '
emb = self.embedding
emb = self.g(self.g(emb, self.edges1.to(emb.device)), self.edges2.to(emb.device))
return emb[self.idx_mapping]
def init_params(self):
glorot(self.embedding)
|
class MessagePassing(nn.Module):
'Base class for creating message passing layers\n\n .. math::\n \\mathbf{x}_i^{\\prime} = \\gamma_{\\mathbf{\\Theta}} \\left( \\mathbf{x}_i,\n \\square_{j \\in \\mathcal{N}(i)} \\, \\phi_{\\mathbf{\\Theta}}\n \\left(\\mathbf{x}_i, \\mathbf{x}_j,\\mathbf{e}_{i,j}\\right) \\right),\n\n where :math:`\\square` denotes a differentiable, permutation invariant\n function, *e.g.*, sum, mean or max, and :math:`\\gamma_{\\mathbf{\\Theta}}`\n and :math:`\\phi_{\\mathbf{\\Theta}}` denote differentiable functions such as\n MLPs.\n See `here <https://rusty1s.github.io/pytorch_geometric/build/html/notes/\n create_gnn.html>`__ for the accompanying tutorial.\n\n '
def __init__(self, aggr='add'):
super(MessagePassing, self).__init__()
self.message_args = inspect.getargspec(self.message)[0][1:]
self.update_args = inspect.getargspec(self.update)[0][2:]
def propagate(self, aggr, edge_index, **kwargs):
'The initial call to start propagating messages.\n Takes in an aggregation scheme (:obj:`"add"`, :obj:`"mean"` or\n :obj:`"max"`), the edge indices, and all additional data which is\n needed to construct messages and to update node embeddings.'
assert (aggr in ['add', 'mean', 'max'])
kwargs['edge_index'] = edge_index
size = None
message_args = []
for arg in self.message_args:
if (arg[(- 2):] == '_i'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[0]])
elif (arg[(- 2):] == '_j'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[1]])
else:
message_args.append(kwargs[arg])
update_args = [kwargs[arg] for arg in self.update_args]
out = self.message(*message_args)
out = scatter_(aggr, out, edge_index[0], dim_size=size)
out = self.update(out, *update_args)
return out
def message(self, x_j):
'Constructs messages in analogy to :math:`\\phi_{\\mathbf{\\Theta}}`\n for each edge in :math:`(i,j) \\in \\mathcal{E}`.\n Can take any argument which was initially passed to :meth:`propagate`.\n In addition, features can be lifted to the source node :math:`i` and\n target node :math:`j` by appending :obj:`_i` or :obj:`_j` to the\n variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.'
return x_j
def update(self, aggr_out):
'Updates node embeddings in analogy to\n :math:`\\gamma_{\\mathbf{\\Theta}}` for each node\n :math:`i \\in \\mathcal{V}`.\n Takes in the output of aggregation as first argument and any argument\n which was initially passed to :meth:`propagate`.'
return aggr_out
|
class GATConv(MessagePassing):
'The graph attentional operator from the `"Graph Attention Networks"\n <https://arxiv.org/abs/1710.10903>`_ paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\alpha_{i,i}\\mathbf{\\Theta}\\mathbf{x}_{j} +\n \\sum_{j \\in \\mathcal{N}(i)} \\alpha_{i,j}\\mathbf{\\Theta}\\mathbf{x}_{j},\n\n where the attention coefficients :math:`\\alpha_{i,j}` are computed as\n\n .. math::\n \\alpha_{i,j} =\n \\frac{\n \\exp\\left(\\mathrm{LeakyReLU}\\left(\\mathbf{a}^{\\top}\n [\\mathbf{\\Theta}\\mathbf{x}_i \\, \\Vert \\, \\mathbf{\\Theta}\\mathbf{x}_j]\n \\right)\\right)}\n {\\sum_{k \\in \\mathcal{N}(i) \\cup \\{ i \\}}\n \\exp\\left(\\mathrm{LeakyReLU}\\left(\\mathbf{a}^{\\top}\n [\\mathbf{\\Theta}\\mathbf{x}_i \\, \\Vert \\, \\mathbf{\\Theta}\\mathbf{x}_k]\n \\right)\\right)}.\n\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n heads (int, optional): Number of multi-head-attentions. (default:\n :obj:`1`)\n concat (bool, optional): If set to :obj:`False`, the multi-head\n attentions are averaged instead of concatenated. (default: :obj:`True`)\n negative_slope (float, optional): LeakyReLU angle of the negative\n slope. (default: :obj:`0.2`)\n dropout (float, optional): Dropout probability of the normalized\n attention coefficients which exposes each node to a stochastically\n sampled neighborhood during training. (default: :obj:`0`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n '
def __init__(self, in_channels, out_channels, heads=1, concat=True, negative_slope=0.2, dropout=0, bias=True):
super(GATConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.weight = nn.Parameter(torch.Tensor(in_channels, (heads * out_channels)))
self.att = nn.Parameter(torch.Tensor(1, heads, (2 * out_channels)))
if (bias and concat):
self.bias = nn.Parameter(torch.Tensor((heads * out_channels)))
elif (bias and (not concat)):
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index):
''
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
x = torch.mm(x, self.weight).view((- 1), self.heads, self.out_channels)
return self.propagate('add', edge_index, x=x, num_nodes=x.size(0))
def message(self, x_i, x_j, edge_index, num_nodes):
alpha = (torch.cat([x_i, x_j], dim=(- 1)) * self.att).sum(dim=(- 1))
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0], num_nodes)
alpha = F.dropout(alpha, p=self.dropout)
return (x_j * alpha.view((- 1), self.heads, 1))
def update(self, aggr_out):
if (self.concat is True):
aggr_out = aggr_out.view((- 1), (self.heads * self.out_channels))
else:
aggr_out = aggr_out.mean(dim=1)
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.heads)
|
class ConcatEmbeddings(nn.Module):
'Concat rx and dx ontology embedding for easy access\n '
def __init__(self, config, dx_voc, rx_voc):
super(ConcatEmbeddings, self).__init__()
self.special_embedding = nn.Parameter(torch.Tensor(((config.vocab_size - len(dx_voc.idx2word)) - len(rx_voc.idx2word)), config.hidden_size))
self.rx_embedding = OntologyEmbedding(rx_voc, build_atc_tree, config.hidden_size, config.graph_hidden_size, config.graph_heads)
self.dx_embedding = OntologyEmbedding(dx_voc, build_icd9_tree, config.hidden_size, config.graph_hidden_size, config.graph_heads)
self.init_params()
def forward(self, input_ids):
emb = torch.cat([self.special_embedding, self.rx_embedding(), self.dx_embedding()], dim=0)
return emb[input_ids]
def init_params(self):
glorot(self.special_embedding)
|
class FuseEmbeddings(nn.Module):
'Construct the embeddings from ontology, patient info and type embeddings.\n '
def __init__(self, config, dx_voc, rx_voc):
super(FuseEmbeddings, self).__init__()
self.ontology_embedding = ConcatEmbeddings(config, dx_voc, rx_voc)
self.type_embedding = nn.Embedding(2, config.hidden_size)
def forward(self, input_ids, input_types=None, input_positions=None):
'\n :param input_ids: [B, L]\n :param input_types: [B, L]\n :param input_positions:\n :return:\n '
ontology_embedding = (self.ontology_embedding(input_ids) + self.type_embedding(input_types))
return ontology_embedding
|
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if (word not in self.word2idx):
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
|
class EHRTokenizer(object):
'Runs end-to-end tokenization'
def __init__(self, data_dir, special_tokens=('[PAD]', '[CLS]', '[MASK]')):
self.vocab = Voc()
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
self.rx_voc_multi = Voc()
self.dx_voc_multi = Voc()
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_voc_multi.add_sentence([code.rstrip('\n')])
with open(os.path.join(data_dir, 'dx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.dx_voc_multi.add_sentence([code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
'Converts a sequence of tokens into ids using the vocab.'
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
'Converts a sequence of ids in wordpiece tokens using the vocab.'
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
|
class EHRDataset(Dataset):
def __init__(self, data_pd, tokenizer: EHRTokenizer, max_seq_len):
self.data_pd = data_pd
self.tokenizer = tokenizer
self.seq_len = max_seq_len
self.sample_counter = 0
def transform_data(data):
'\n :param data: raw data form\n :return: {subject_id, [adm, 2, codes]},\n '
records = {}
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[(data['SUBJECT_ID'] == subject_id)]
patient = []
for (_, row) in item_df.iterrows():
admission = [list(row['ICD9_CODE']), list(row['ATC4'])]
patient.append(admission)
if (len(patient) < 2):
continue
records[subject_id] = patient
return records
self.records = transform_data(data_pd)
def __len__(self):
return len(self.records)
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
subject_id = list(self.records.keys())[item]
def fill_to_max(l, seq):
while (len(l) < seq):
l.append('[PAD]')
return l
'extract input and output tokens\n '
input_tokens = []
output_dx_tokens = []
output_rx_tokens = []
for (idx, adm) in enumerate(self.records[subject_id]):
input_tokens.extend((['[CLS]'] + fill_to_max(list(adm[0]), (self.seq_len - 1))))
input_tokens.extend((['[CLS]'] + fill_to_max(list(adm[1]), (self.seq_len - 1))))
if (idx != 0):
output_rx_tokens.append(list(adm[1]))
output_dx_tokens.append(list(adm[0]))
'convert tokens to id\n '
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
output_dx_labels = []
output_rx_labels = []
dx_voc_size = len(self.tokenizer.dx_voc_multi.word2idx)
rx_voc_size = len(self.tokenizer.rx_voc_multi.word2idx)
for tokens in output_dx_tokens:
tmp_labels = np.zeros(dx_voc_size)
tmp_labels[list(map((lambda x: self.tokenizer.dx_voc_multi.word2idx[x]), tokens))] = 1
output_dx_labels.append(tmp_labels)
for tokens in output_rx_tokens:
tmp_labels = np.zeros(rx_voc_size)
tmp_labels[list(map((lambda x: self.tokenizer.rx_voc_multi.word2idx[x]), tokens))] = 1
output_rx_labels.append(tmp_labels)
if (cur_id < 5):
logger.info('*** Example ***')
logger.info(('subject_id: %s' % subject_id))
logger.info(('input tokens: %s' % ' '.join([str(x) for x in input_tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
assert (len(input_ids) == ((self.seq_len * 2) * len(self.records[subject_id])))
assert (len(output_dx_labels) == (len(self.records[subject_id]) - 1))
cur_tensors = (torch.tensor(input_ids).view((- 1), self.seq_len), torch.tensor(output_dx_labels, dtype=torch.float), torch.tensor(output_rx_labels, dtype=torch.float))
return cur_tensors
|
def load_dataset(args):
data_dir = args.data_dir
max_seq_len = args.max_seq_length
tokenizer = EHRTokenizer(data_dir)
data = pd.read_pickle(os.path.join(data_dir, 'data-multi-visit.pkl'))
ids_file = [os.path.join(data_dir, 'train-id.txt'), os.path.join(data_dir, 'eval-id.txt'), os.path.join(data_dir, 'test-id.txt')]
def load_ids(data, file_name):
'\n :param data: multi-visit data\n :param file_name:\n :return: raw data form\n '
ids = []
with open(file_name, 'r') as f:
for line in f:
ids.append(int(line.rstrip('\n')))
return data[data['SUBJECT_ID'].isin(ids)].reset_index(drop=True)
return (tokenizer, tuple(map((lambda x: EHRDataset(load_ids(data, x), tokenizer, max_seq_len)), ids_file)))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='GBert-predict', type=str, required=False, help='model name')
parser.add_argument('--data_dir', default='../data', type=str, required=False, help='The input data dir.')
parser.add_argument('--pretrain_dir', default='../saved/GBert-pretraining', type=str, required=False, help='pretraining model')
parser.add_argument('--train_file', default='data-multi-visit.pkl', type=str, required=False, help='training data file.')
parser.add_argument('--output_dir', default='../saved/', type=str, required=False, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--use_pretrain', default=False, action='store_true', help='is use pretrain')
parser.add_argument('--graph', default=False, action='store_true', help='if use ontology embedding')
parser.add_argument('--therhold', default=0.3, type=float, help='therhold.')
parser.add_argument('--max_seq_length', default=55, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', default=False, action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', default=True, action='store_true', help='Whether to run on the dev set.')
parser.add_argument('--do_test', default=True, action='store_true', help='Whether to run on the test set.')
parser.add_argument('--train_batch_size', default=1, type=int, help='Total batch size for training.')
parser.add_argument('--learning_rate', default=0.0005, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=20.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--seed', type=int, default=1203, help='random seed for initialization')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.model_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
os.makedirs(args.output_dir, exist_ok=True)
print('Loading Dataset')
(tokenizer, (train_dataset, eval_dataset, test_dataset)) = load_dataset(args)
train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=1)
eval_dataloader = DataLoader(eval_dataset, sampler=SequentialSampler(eval_dataset), batch_size=1)
test_dataloader = DataLoader(test_dataset, sampler=SequentialSampler(test_dataset), batch_size=1)
print(('Loading Model: ' + args.model_name))
if args.use_pretrain:
logger.info('Use Pretraining model')
model = GBERT_Predict.from_pretrained(args.pretrain_dir, tokenizer=tokenizer)
else:
config = BertConfig(vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx))
config.graph = args.graph
model = GBERT_Predict(config, tokenizer)
logger.info(('# of model parameters: ' + str(get_n_params(model))))
model.to(device)
model_to_save = (model.module if hasattr(model, 'module') else model)
rx_output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin')
optimizer = Adam(model.parameters(), lr=args.learning_rate)
global_step = 0
if args.do_train:
writer = SummaryWriter(args.output_dir)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Batch size = %d', 1)
(dx_acc_best, rx_acc_best) = (0, 0)
acc_name = 'prauc'
dx_history = {'prauc': []}
rx_history = {'prauc': []}
for _ in trange(int(args.num_train_epochs), desc='Epoch'):
print('')
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
prog_iter = tqdm(train_dataloader, leave=False, desc='Training')
model.train()
for (_, batch) in enumerate(prog_iter):
batch = tuple((t.to(device) for t in batch))
(input_ids, dx_labels, rx_labels) = batch
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(dim=0), dx_labels.squeeze(dim=0), rx_labels.squeeze(dim=0))
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels, epoch=global_step)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += 1
nb_tr_steps += 1
prog_iter.set_postfix(loss=('%.4f' % (tr_loss / nb_tr_steps)))
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/loss', (tr_loss / nb_tr_steps), global_step)
global_step += 1
if args.do_eval:
print('')
logger.info('***** Running eval *****')
model.eval()
dx_y_preds = []
dx_y_trues = []
rx_y_preds = []
rx_y_trues = []
for eval_input in tqdm(eval_dataloader, desc='Evaluating'):
eval_input = tuple((t.to(device) for t in eval_input))
(input_ids, dx_labels, rx_labels) = eval_input
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(), dx_labels.squeeze(), rx_labels.squeeze(dim=0))
with torch.no_grad():
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels)
rx_y_preds.append(t2n(torch.sigmoid(rx_logits)))
rx_y_trues.append(t2n(rx_labels))
print('')
rx_acc_container = metric_report(np.concatenate(rx_y_preds, axis=0), np.concatenate(rx_y_trues, axis=0), args.therhold)
for (k, v) in rx_acc_container.items():
writer.add_scalar('eval/{}'.format(k), v, global_step)
if (rx_acc_container[acc_name] > rx_acc_best):
rx_acc_best = rx_acc_container[acc_name]
torch.save(model_to_save.state_dict(), rx_output_model_file)
with open(os.path.join(args.output_dir, 'bert_config.json'), 'w', encoding='utf-8') as fout:
fout.write(model.config.to_json_string())
if args.do_test:
logger.info('***** Running test *****')
logger.info(' Num examples = %d', len(test_dataset))
logger.info(' Batch size = %d', 1)
def test(task=0):
model_state_dict = torch.load(rx_output_model_file)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
y_preds = []
y_trues = []
for test_input in tqdm(test_dataloader, desc='Testing'):
test_input = tuple((t.to(device) for t in test_input))
(input_ids, dx_labels, rx_labels) = test_input
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(), dx_labels.squeeze(), rx_labels.squeeze(dim=0))
with torch.no_grad():
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels)
y_preds.append(t2n(torch.sigmoid(rx_logits)))
y_trues.append(t2n(rx_labels))
print('')
acc_container = metric_report(np.concatenate(y_preds, axis=0), np.concatenate(y_trues, axis=0), args.therhold)
if args.do_train:
for (k, v) in acc_container.items():
writer.add_scalar('test/{}'.format(k), v, 0)
return acc_container
test(task=0)
|
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if (word not in self.word2idx):
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
|
class EHRTokenizer(object):
'Runs end-to-end tokenization'
def __init__(self, data_dir, special_tokens=('[PAD]', '[CLS]', '[MASK]')):
self.vocab = Voc()
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
self.rx_singe2multi = []
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_singe2multi.append(self.rx_voc.word2idx[code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
'Converts a sequence of tokens into ids using the vocab.'
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
'Converts a sequence of ids in wordpiece tokens using the vocab.'
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
|
def save():
tokenizer = EHRTokenizer(data_dir='../data')
logger.info('Use Pretraining model')
model = TSNE.from_pretrained(model_name, dx_voc=tokenizer.dx_voc, rx_voc=tokenizer.rx_voc)
model(output_dir=output_dir)
logger.info(('# of model parameters: ' + str(get_n_params(model))))
|
def generate_meta(build_tree_func, task, output_path='emb-meta.tsv'):
tokenizer = EHRTokenizer(data_dir='../data')
voc = (tokenizer.dx_voc if (task == 0) else tokenizer.rx_voc)
(res, graph_voc) = build_tree_func(list(voc.idx2word.values()))
level_dict = {}
for row in res:
for (level, item) in enumerate(row):
level_dict[item] = level
with open(os.path.join(output_dir, (('dx-' if (task == 0) else 'rx-') + output_path)), 'w') as fout:
fout.write('name\tlevel\n')
for (word, _) in graph_voc.word2idx.items():
fout.write('{}\t{}\n'.format(word, str(level_dict[word])))
|
def generate_meta_for_not_graph(task, output_path='emb-meta.tsv'):
tokenizer = EHRTokenizer(data_dir='../data')
voc = (tokenizer.dx_voc if (task == 0) else tokenizer.rx_voc)
with open(os.path.join(output_dir, (('dx-' if (task == 0) else 'rx-') + output_path)), 'w') as fout:
for (word, _) in voc.word2idx.items():
fout.write('{}\n'.format(word))
|
class HierarchicalContextAggregationLoss(nn.Module):
'\n Implementation of Hierarchical Context Aggregation\n\n This loss combines multiple PixelwiseContextual losses with different (alpha, beta) scales.\n Given a descriptor with n-dims and n-losses scales, each loss is given n-dims//n-losses.\n Theoretically, each of these losses could also have different margins, but in practice they are usually equal.\n\n Attributes can be provided as a single number (same value is used for all losses) or as a list (must contain value\n to use with each loss).\n\n Attributes:\n n_scales (int): Number of PixelwiseContextual losses\n margins (list or float): Target margin distance between positives and negatives\n alphas (list or int): Minimum distance from original positive KeyPoint\n betas (list or int): Maximum distance from original positive KeyPoint\n n-negs (list or int): Number of negative samples to generate\n\n Methods:\n forward: Compute pixel-wise contrastive loss\n forward_eval: Detailed forward pass for logging\n '
def __init__(self, n_scales=1, margins=0.5, alphas=None, betas=None, n_negs=10):
super().__init__()
self.n_scales = n_scales
self.margins = margins
self.alphas = alphas
self.betas = betas
self.n_negs = n_negs
(self.features, self.labels) = (None, None)
self._losses = self._parse_losses()
self._has_warned = False
def __repr__(self):
params = (self.n_scales, self.margins, self.alphas, self.betas, self.n_negs)
return f'{self.__class__.__qualname__}{params}'
def __str__(self):
return '__'.join([f'Loss {i}: {loss}' for (i, loss) in enumerate(self._losses)])
@staticmethod
def create_parser(parser):
parser.add_argument('--n-s', default=1, type=int, help='Number of hierarchical sampling strategies')
parser.add_argument('--margins', default=0.5, type=str, nargs='*', help='List of margins for each Scale')
parser.add_argument('--alphas', default=None, type=str, nargs='*', help='List of alphas for each Scale')
parser.add_argument('--betas', default=None, type=str, nargs='*', help='List of betas for each Scale')
parser.add_argument('--n-negs', default=10, type=str, nargs='*', help='List of n-negs for each Scale')
def _parse_losses(self):
configs = [self.margins, self.alphas, self.betas, self.n_negs]
configs = [(c if isinstance(c, (list, tuple)) else ([c] * self.n_scales)) for c in configs]
configs = [*zip(*configs)]
if (len(configs) != self.n_scales):
raise ValueError(f'Invalid number of configurations. ({self.n_scales} vs. {len(configs)}) ')
return [PixelwiseContrastiveLoss(*c) for c in configs]
def forward(self, features, labels):
' Compute pixel-wise contrastive loss.\n :param features: Vertically stacked feature maps (b, n-dim, h*2, w)\n :param labels: Horizontally stacked correspondence KeyPoints (b, n-kpts, 4) -> (x1, y1, x2, y2)\n :return: Loss\n '
if ((features.shape[1] % self.n_scales) and (not self._has_warned)):
warn(f'Feature dimensions and scales are not exactly divisible. ({features.shape[1]} and {self.n_scales})')
self._has_warned = True
feature_chunks = torch.chunk(features, self.n_scales, dim=1)
return sum([loss(feat, labels) for (feat, loss) in zip(feature_chunks, self._losses)])
def forward_eval(self, features, labels):
feature_chunks = torch.chunk(features, self.n_scales, dim=1)
loss_evals = [loss.forward_eval(feat, labels) for (feat, loss) in zip(feature_chunks, self._losses)]
(loss, loss_evals) = list(zip(*loss_evals))
out_loss = sum(loss)
output = {}
for (le, loss) in zip(loss_evals, self._losses):
for (cat, vals) in le.items():
if (cat not in output.keys()):
output[cat] = {}
for (k, v) in vals.items():
output[cat][f'{k}/{loss}'] = v
return (out_loss, output)
|
class PixelwiseContrastiveLoss(nn.Module):
'\n Implementation of "pixel-wise" contrastive loss. Contrastive loss typically compares two whole images.\n L = (Y) * (1/2 * d**2) + (1 - Y) * (1/2 * max(0, margin - d)**2)\n\n In this instance, we instead compare pairs of features within those images.\n Positive matches are given by ground truth correspondences between images.\n Negative matches are generated on-the-fly based on provided parameters.\n\n Attributes:\n margin (float): Target margin distance between positives and negatives\n alpha (int): Minimum distance from original positive KeyPoint\n beta (int): Maximum distance from original positive KeyPoint\n n-neg (int): Number of negative samples to generate\n\n Methods:\n forward: Compute pixel-wise contrastive loss\n forward_eval: Detailed forward pass for logging\n '
def __init__(self, margin=0.5, alpha=None, beta=None, n_neg=10):
super().__init__()
self.margin = margin
self.alpha = alpha
self.beta = beta
self.n_neg = n_neg
self._dist = nn.PairwiseDistance()
def __repr__(self):
return f'{self.__class__.__qualname__}({self.margin}, {self.alpha}, {self.beta}, {self.n_neg})'
def __str__(self):
return f"Min{(self.alpha or 0)}_Max{(self.beta or 'Inf')}"
@staticmethod
def create_parser(parser):
parser.add_argument('--margin', default=0.5, help='Target distance between negative feature embeddings.')
parser.add_argument('--alpha', default=None, type=float, help='Minimum distance from positive KeyPoint')
parser.add_argument('--beta', default=None, type=float, help='Maximum distance from positive KeyPoint')
parser.add_argument('--n_neg', default=10, help='Number of negative samples to generate')
def forward(self, features, labels):
' Compute pixel-wise contrastive loss.\n :param features: Vertically stacked feature maps (b, n-dim, h*2, w)\n :param labels: Horizontally stacked correspondence KeyPoints (b, n-kpts, 4) -> (x1, y1, x2, y2)\n :return: Loss\n '
(source, target) = torch.chunk(features, 2, dim=(- 2))
(source_kpts, target_kpts) = torch.chunk(labels, 2, dim=(- 1))
loss = self._positive_loss(source, target, source_kpts, target_kpts)[0]
loss += self._negative_loss(source, target, source_kpts, target_kpts)[0]
return loss
def forward_eval(self, features, labels):
(source, target) = torch.chunk(features, 2, dim=(- 2))
(source_kpts, target_kpts) = torch.chunk(labels, 2, dim=(- 1))
(pos_loss, pos_dist) = self._positive_loss(source, target, source_kpts, target_kpts)
(neg_loss, neg_dist) = self._negative_loss(source, target, source_kpts, target_kpts)
loss = (pos_loss + neg_loss).item()
output = {'scalars': {'loss': loss, 'positive': pos_dist.mean().item(), 'negative': neg_dist.mean().item()}, 'histograms': {'hist_positive': pos_dist.detach().cpu(), 'hist_negative': neg_dist.detach().cpu()}}
return (loss, output)
def _calc_distance(self, source, target, source_kpts, target_kpts):
source_descriptors = extract_kpt_vectors(source, source_kpts).permute([0, 2, 1])
target_descriptors = extract_kpt_vectors(target, target_kpts).permute([0, 2, 1])
return self._dist(source_descriptors, target_descriptors)
def _positive_loss(self, source, target, source_kpts, target_kpts):
dist = self._calc_distance(source, target, source_kpts, target_kpts)
loss = ((dist ** 2).mean() / 2)
return (loss, dist)
def _negative_loss(self, source, target, source_kpts, target_kpts):
(dsource_kpts, dtarget_kpts) = self._generate_negative_like(source, source_kpts, target_kpts)
dist = self._calc_distance(source, target, dsource_kpts, dtarget_kpts)
margin_dist = (self.margin - dist).clamp(min=0.0)
loss = ((margin_dist ** 2).mean() / 2)
return (loss, dist)
def _generate_negative_like(self, other, source_kpts, target_kpts):
source_kpts = source_kpts.repeat([1, self.n_neg, 1])
target_kpts = target_kpts.repeat([1, self.n_neg, 1])
target_kpts = self._permute_negatives(target_kpts, other.shape)
return (source_kpts, target_kpts)
def _permute_negatives(self, kpts, shape):
(h, w) = shape[(- 2):]
low = (self.alpha if self.alpha else 0)
high = (self.beta if self.beta else (max(h, w) - low))
shift = torch.randint_like(kpts, low=low, high=high)
shift *= torch.sign((torch.rand_like(shift, dtype=torch.float) - 0.5)).short()
new_kpts = (kpts + shift)
new_kpts %= torch.tensor((w, h), dtype=torch.short, device=new_kpts.device)
diffs = (new_kpts - kpts)
diff_clamp = torch.clamp(diffs, min=(- high), max=high)
new_kpts += (diff_clamp - diffs)
return new_kpts
|
def main():
args = parser.parse_args()
device = ops.get_device()
ckpt_file = Path(args.model_path, args.model_name).with_suffix('.pt')
img_file = Path(args.image_file)
img_np = imread(img_file)
img_torch = ops.img2torch(img_np, batched=True).to(device)
print(f'Image size (np): {img_np.shape}')
print(f'Image size (torch): {img_torch.shape}')
model = Sand.from_ckpt(ckpt_file).to(device)
model.eval()
with torch.no_grad():
features_torch = model(img_torch)
features_np = ops.fmap2img(features_torch).squeeze(0)
print(f'Feature size (torch): {features_torch.shape}')
print(f'Feature size (np): {features_np.shape}')
(ax1, ax2) = plt.subplots(2, 1)[1]
(ax1.set_xticks([]), ax1.set_yticks([]))
(ax2.set_xticks([]), ax2.set_yticks([]))
ax1.imshow(img_np)
ax2.imshow(features_np)
plt.show()
|
class SiameseSand(nn.Module):
def __init__(self, n_dims):
super().__init__()
self.n_dims = n_dims
self.branch = Sand(self.n_dims)
def forward(self, features):
(f1, f2) = torch.chunk(features, 2, dim=2)
(d1, d2) = (self.branch(f1), self.branch(f2))
out = torch.cat([d1, d2], dim=2)
return out
|
class Timer():
'Context manager to time a piece of code, including GPU synchronization.'
def __init__(self, as_ms=False):
(self.start, self.end) = (None, None)
self.scale = (1000 if as_ms else 1)
self.is_gpu = torch.cuda.is_available()
def __enter__(self):
if self.is_gpu:
torch.cuda.synchronize()
self.start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.is_gpu:
torch.cuda.synchronize()
self.end = time.perf_counter()
@property
def elapsed(self):
return ((self.scale * (self.end - self.start)) if self.end else None)
|
def main():
with Timer() as t:
time.sleep(2)
print(f'{t.elapsed} secs')
with Timer(as_ms=True) as t:
time.sleep(0.002)
print(f'{t.elapsed} ms')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.