repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Voxurf | Voxurf-main/run.py | import os, sys, copy, glob, json, time, random, argparse, cv2
from shutil import copyfile
from tqdm import tqdm, trange
import math
import mmcv
import imageio
import numpy as np
import trimesh
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from datetime import datetime
from lib import utils, dtu_eval
# from torch.utils.tensorboard import SummaryWriter
from lib.load_data import load_data
from lib.utils import rgb_to_luminance, get_sobel, calc_grad, \
GradLoss, write_ply, load_point_cloud, get_root_logger
from torch_efficient_distloss import flatten_eff_distloss
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
def config_parser():
'''Define command line arguments
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', required=True,
help='config file path')
parser.add_argument("--seed", type=int, default=777,
help='Random seed')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--no_reload_optimizer", action='store_true',
help='do not reload optimizer state from saved ckpt')
parser.add_argument("--ft_path", type=str, default='',
help='specific weights npy file to reload for coarse network')
parser.add_argument("--export_bbox_and_cams_only", type=str, default='',
help='export scene bbox and camera poses for debugging and 3d visualization')
parser.add_argument("--export_coarse_only", type=str, default='')
parser.add_argument("--export_fine_only", type=str, default='')
parser.add_argument("--mesh_from_sdf", action='store_true')
# testing options
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true')
parser.add_argument("--render_train", action='store_true')
parser.add_argument("--render_video", action='store_true')
parser.add_argument("--render_video_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
parser.add_argument("--eval_ssim", default=True)
parser.add_argument("--eval_lpips_alex", default=True)
parser.add_argument("--eval_lpips_vgg", default=True)
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_validate", type=int, default=10000)
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("-s", "--suffix", type=str, default="",
help='suffix for exp name')
parser.add_argument("-p", "--prefix", type=str, default="",
help='prefix for exp name')
parser.add_argument("--load_density_only", type=int, default=1)
parser.add_argument("--load_expname", type=str, default="") # dvgo_Statues_original
parser.add_argument("--sdf_mode", type=str, default="density")
parser.add_argument("--scene", type=str, default=0)
parser.add_argument("--no_dvgo_init", action='store_true')
parser.add_argument("--run_dvgo_init", action='store_true')
parser.add_argument("--interpolate", default='')
parser.add_argument("--extract_color", action='store_true')
return parser
@torch.no_grad()
@torch.no_grad()
def render_viewpoints(model, render_poses, HW, Ks, ndc, render_kwargs,
gt_imgs=None, masks=None, savedir=None, render_factor=0, idx=None,
eval_ssim=True, eval_lpips_alex=True, eval_lpips_vgg=True,
use_bar=True, step=0, rgb_only=False):
'''Render images for the given viewpoints; run evaluation if gt given.
'''
assert len(render_poses) == len(HW) and len(HW) == len(Ks)
if render_factor!=0:
HW = np.copy(HW)
Ks = np.copy(Ks)
HW //= render_factor
Ks[:, :2, :3] //= render_factor
rgbs = []
normals = []
ins = []
outs = []
disps = []
psnrs = []
fore_psnrs = []
bg_psnrs = []
ssims = []
lpips_alex = []
lpips_vgg = []
render_normal = True
split_bg = getattr(model, "bg_density", False)
for i, c2w in enumerate(tqdm(render_poses)):
H, W = HW[i]
K = Ks[i]
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H, W, K, c2w, ndc, inverse_y=render_kwargs['inverse_y'],
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
keys = ['rgb_marched', 'disp', 'alphainv_cum']
if render_normal:
keys.append('normal_marched')
if split_bg:
keys.extend(['in_marched', 'out_marched'])
rays_o = rays_o.flatten(0, -2)
rays_d = rays_d.flatten(0, -2)
viewdirs = viewdirs.flatten(0, -2)
render_result_chunks = [
{k: v for k, v in model(ro, rd, vd, **render_kwargs).items() if k in keys}
for ro, rd, vd in zip(rays_o.split(8192, 0), rays_d.split(8192, 0), viewdirs.split(8192, 0))
]
render_result = {
k: torch.cat([ret[k] for ret in render_result_chunks]).reshape(H,W,-1)
for k in render_result_chunks[0].keys()
}
rgb = render_result['rgb_marched'].cpu().numpy()
rgbs.append(rgb)
if rgb_only and savedir is not None:
imageio.imwrite(os.path.join(savedir, '{:03d}.png'.format(i)), utils.to8b(rgb))
continue
disp = render_result['disp'].cpu().numpy()
disps.append(disp)
if render_normal:
normal = render_result['normal_marched'].cpu().numpy()
normals.append(normal)
if split_bg:
inside = render_result['in_marched'].cpu().numpy()
ins.append(inside)
outside = render_result['out_marched'].cpu().numpy()
outs.append(outside)
if masks is not None:
if isinstance(masks[i], torch.Tensor):
mask = masks[i].cpu().numpy() #.reshape(H, W, 1)
else:
mask = masks[i] #.reshape(H, W, 1)
if mask.ndim == 2:
mask = mask.reshape(H, W, 1)
bg_rgb = rgb * (1 - mask)
bg_gt = gt_imgs[i] * (1 - mask)
else:
mask, bg_rgb, bg_gt = np.ones(rgb.shape[:2]), np.ones(rgb.shape), np.ones(rgb.shape)
if i==0:
logger.info('Testing {} {}'.format(rgb.shape, disp.shape))
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb - gt_imgs[i])))
back_p, fore_p = 0., 0.
if masks is not None:
back_p = -10. * np.log10(np.sum(np.square(bg_rgb - bg_gt))/np.sum(1-mask))
fore_p = -10. * np.log10(np.sum(np.square(rgb - gt_imgs[i]))/np.sum(mask))
error = 1 - np.exp(-20 * np.square(rgb - gt_imgs[i]).sum(-1))[...,None].repeat(3,-1)
print("{} | full-image psnr {:.2f} | foreground psnr {:.2f} | background psnr: {:.2f} ".format(i, p, fore_p, back_p))
psnrs.append(p)
fore_psnrs.append(fore_p)
bg_psnrs.append(back_p)
if eval_ssim:
ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1))
if eval_lpips_alex:
lpips_alex.append(utils.rgb_lpips(rgb, gt_imgs[i], net_name='alex', device='cpu'))
if eval_lpips_vgg:
lpips_vgg.append(utils.rgb_lpips(rgb, gt_imgs[i], net_name='vgg', device='cpu'))
if savedir is not None:
rgb8 = utils.to8b(rgbs[-1])
id = idx if idx is not None else i
step_pre = str(step) + '_' if step > 0 else ''
filename = os.path.join(savedir, step_pre+'{:03d}.png'.format(id))
rendername = os.path.join(savedir, step_pre + 'render_{:03d}.png'.format(id))
gtname = os.path.join(savedir, step_pre + 'gt_{:03d}.png'.format(id))
img8 = rgb8
if gt_imgs is not None:
error8 = utils.to8b(error)
gt8 = utils.to8b(gt_imgs[i])
imageio.imwrite(gtname, gt8)
img8 = np.concatenate([error8, rgb8, gt8], axis=0)
if split_bg and gt_imgs is not None:
in8 = utils.to8b(ins[-1])
out8 = utils.to8b(outs[-1])
img8_2 = np.concatenate([in8, out8], axis=1)
img8 = np.concatenate([rgb8, gt8], axis=1)
img8 = np.concatenate([img8, img8_2], axis=0)
imageio.imwrite(rendername, rgb8)
imageio.imwrite(filename, img8)
if render_normal:
rot = c2w[:3, :3].permute(1, 0).cpu().numpy()
normal = (rot @ normals[-1][..., None])[...,0]
normal = 0.5 - 0.5 * normal
if masks is not None:
normal = normal * mask.mean(-1)[...,None] + (1 - mask)
normal8 = utils.to8b(normal)
step_pre = str(step) + '_' if step > 0 else ''
filename = os.path.join(savedir, step_pre+'{:03d}_normal.png'.format(id))
imageio.imwrite(filename, normal8)
rgbs = np.array(rgbs)
disps = np.array(disps)
if len(psnrs):
logger.info('Testing psnr {:.2f} (avg) | foreground {:.2f} | background {:.2f}'.format(
np.mean(psnrs), np.mean(fore_psnrs), np.mean(bg_psnrs)))
if eval_ssim: logger.info('Testing ssim {} (avg)'.format(np.mean(ssims)))
if eval_lpips_vgg: logger.info('Testing lpips (vgg) {} (avg)'.format(np.mean(lpips_vgg)))
if eval_lpips_alex: logger.info('Testing lpips (alex) {} (avg)'.format(np.mean(lpips_alex)))
return rgbs, disps
def gen_poses_between(pose_0, pose_1, ratio):
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
return pose
def interpolate_view(savedir, img_idx_0, img_idx_1, render_poses, HW, Ks, ndc, repeat=1, **render_kwargs):
render_poses = render_poses.cpu().numpy()
pose_0, pose_1 = render_poses[img_idx_0], render_poses[img_idx_1]
images = []
n_frames = 60
image_dir = os.path.join(savedir, 'images_full')
os.makedirs(image_dir, exist_ok=True)
poses = []
for i in range(n_frames):
new_pose = gen_poses_between(pose_0, pose_1, np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5)
poses.append(new_pose)
render_kwargs.update(dict(
savedir=image_dir,
eval_ssim=False, eval_lpips_alex=False, eval_lpips_vgg=False,
rgb_only=True,
))
HW = HW[:1].repeat(len(poses),0)
Ks = Ks[:1].repeat(len(poses),0)
rgbs, _ = render_viewpoints(render_poses=torch.from_numpy(np.asarray(poses)).cuda(),
HW=HW, Ks=Ks, ndc=ndc, **render_kwargs)
for i in range(n_frames):
images.append(rgbs[i])
for i in range(n_frames):
images.append(rgbs[n_frames - i - 1])
h, w, _ = images[0].shape
imageio.mimwrite(os.path.join(savedir, 'render_{}_{}.mp4'.format(img_idx_0, img_idx_1)),
utils.to8b(images), fps=30, quality=8)
def seed_everything():
'''Seed everything for better reproducibility.
(some pytorch operation is non-deterministic like the backprop of grid_samples)
'''
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
def load_everything(args, cfg):
'''Load images / poses / camera settings / data split.
'''
mode = getattr(cfg.data, 'mode', dict())
if 'train_all' in cfg:
mode.update(train_all=cfg.train_all)
print(" * * * Train with all the images: {} * * * ".format(cfg.train_all))
if 'reso_level' in cfg:
mode.update(reso_level=cfg.reso_level)
data_dict = load_data(cfg.data, **mode, white_bg=cfg.data.white_bkgd)
# remove useless field
kept_keys = {
'hwf', 'HW', 'Ks', 'near', 'far',
'i_train', 'i_val', 'i_test', 'irregular_shape',
'poses', 'render_poses', 'images', 'scale_mats_np', 'masks'}
for k in list(data_dict.keys()):
if k not in kept_keys:
data_dict.pop(k)
# construct data tensor
if data_dict['irregular_shape']:
data_dict['images'] = [torch.FloatTensor(im, device='cpu').cuda() for im in data_dict['images']]
data_dict['masks'] = [torch.FloatTensor(im, device='cpu').cuda() for im in data_dict['masks']]
else:
data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu').cuda()
data_dict['masks'] = torch.FloatTensor(data_dict['masks'], device='cpu').cuda()
data_dict['poses'] = torch.Tensor(data_dict['poses'])
return data_dict
def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs):
logger.info('compute_bbox_by_cam_frustrm: start')
xyz_min = torch.Tensor([np.inf, np.inf, np.inf])
xyz_max = -xyz_min
for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]):
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w,
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far])
xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2)))
xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2)))
logger.info('compute_bbox_by_cam_frustrm: xyz_min {}'.format(xyz_min))
logger.info('compute_bbox_by_cam_frustrm: xyz_max {}'.format(xyz_max))
logger.info('compute_bbox_by_cam_frustrm: finish')
return xyz_min, xyz_max
@torch.no_grad()
def compute_bbox_by_coarse_geo(model_class, model_path, thres):
logger.info('compute_bbox_by_coarse_geo: start')
eps_time = time.time()
model = utils.load_model(model_class, model_path, strict=False)
interp = torch.stack(torch.meshgrid(
torch.linspace(0, 1, model.density.shape[2]),
torch.linspace(0, 1, model.density.shape[3]),
torch.linspace(0, 1, model.density.shape[4]),
), -1)
dense_xyz = model.xyz_min * (1-interp) + model.xyz_max * interp
density = model.grid_sampler(dense_xyz, model.density)
alpha = model.activate_density(density)
mask = (alpha > thres)
active_xyz = dense_xyz[mask]
xyz_min = active_xyz.amin(0)
xyz_max = active_xyz.amax(0)
logger.info('compute_bbox_by_coarse_geo: xyz_min {}'.format(xyz_min))
logger.info('compute_bbox_by_coarse_geo: xyz_max {}'.format(xyz_max))
eps_time = time.time() - eps_time
logger.info('compute_bbox_by_coarse_geo: finish (eps time: {} secs)'.format(eps_time))
return xyz_min, xyz_max
def scene_rep_reconstruction(args, cfg, cfg_model, cfg_train, xyz_min, xyz_max, data_dict, stage, coarse_ckpt_path=None, use_dvgo=False):
logger.info("= "*10 + "Begin training state [ {} ]".format(stage) + " ="*10)
# init
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if abs(cfg_model.world_bound_scale - 1) > 1e-9:
xyz_shift = (xyz_max - xyz_min) * (cfg_model.world_bound_scale - 1) / 2
xyz_min -= xyz_shift
xyz_max += xyz_shift
HW, Ks, near, far, i_train, i_val, i_test, poses, render_poses, images, masks = [
data_dict[k] for k in [
'HW', 'Ks', 'near', 'far', 'i_train', 'i_val', 'i_test', 'poses', 'render_poses', 'images', 'masks'
]
]
print("Train idx", i_train, "\nTest idx", i_test)
# find whether there is existing checkpoint path
last_ckpt_path = os.path.join(cfg.basedir, cfg.expname, f'{stage}_last.tar')
if args.no_reload:
reload_ckpt_path = None
elif args.ft_path:
reload_ckpt_path = args.ft_path
elif getattr(cfg_train, 'ft_path', ''):
reload_ckpt_path = cfg_train.ft_path
elif os.path.isfile(last_ckpt_path):
reload_ckpt_path = last_ckpt_path
else:
reload_ckpt_path = None
# init model
model_kwargs = copy.deepcopy(cfg_model)
scale_ratio = getattr(cfg_train, 'scale_ratio', 2)
num_voxels = model_kwargs.pop('num_voxels')
num_voxels_bg = model_kwargs.pop('num_voxels_bg', num_voxels)
if len(cfg_train.pg_scale) and not args.render_only:
deduce = (scale_ratio**len(cfg_train.pg_scale))
num_voxels = int(num_voxels / deduce)
num_voxels_bg = int(num_voxels_bg / deduce)
logger.info("\n" + "+ "*10 + "start with {} resolution deduction".format(deduce) + " +"*10 + "\n")
else:
deduce = 1
if use_dvgo:
# use dvgo init for the w/ mask setting
model = dvgo_ori.DirectVoxGO(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
num_voxels_bg=num_voxels_bg,
mask_cache_path=coarse_ckpt_path,
exppath=os.path.join(cfg.basedir, cfg.expname),
**model_kwargs)
else:
model = Model.Voxurf(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
num_voxels_bg=num_voxels_bg,
mask_cache_path=coarse_ckpt_path,
exppath=os.path.join(cfg.basedir, cfg.expname),
**model_kwargs)
if cfg_model.maskout_near_cam_vox:
model.maskout_near_cam_vox(poses[i_train,:3,3], near)
model = model.to(device)
# init optimizer
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
load_density_from = getattr(cfg_train, 'load_density_from', '')
load_sdf_from = getattr(cfg_train, 'load_sdf_from', '')
if load_density_from and stage == 'surf':
density_ckpt_path = os.path.join(cfg.basedir, load_density_from)
if args.load_density_only:
model = utils.load_grid_data(model, density_ckpt_path, deduce)
else:
reload_ckpt_path = density_ckpt_path
if reload_ckpt_path is None:
logger.info(f'scene_rep_reconstruction ({stage}): train from scratch')
start = 0
else:
logger.info(f'scene_rep_reconstruction ({stage}): reload from {reload_ckpt_path}')
model, optimizer, start = utils.load_checkpoint(
model, optimizer, reload_ckpt_path, args.no_reload_optimizer, strict=False)
logger.info("Restart from iteration {}, model sdf size: {}".format(start, model.sdf.grid.shape))
if reload_ckpt_path.split('/')[-1].split('_')[0] != stage:
start = 0
if cfg_train.get('load_param', False):
model, _, _ = utils.load_checkpoint(
model, None, cfg_train.load_sdf_from, True, strict=False)
# init sdf
if load_sdf_from:
if hasattr(model, 'init_sdf_from_sdf'):
sdf_reduce = cfg_train.get('sdf_reduce', 1.0)
if cfg_train.load_sdf_from == 'auto':
cfg_train.load_sdf_from = os.path.join(cfg.basedir, cfg.expname0, 'coarse', 'surf_last.tar')
if cfg_train.get('load_sdf_path', None):
cfg_train.load_sdf_from = cfg_train.load_sdf_path + 'scan_{}/surf_last.tar'.format(args.scene)
logger.info("\n" + "+ "*10 + "load sdf from: " + cfg_train.load_sdf_from + "+"*10 + "\n")
sdf0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='sdf', return_raw=True)
model.init_sdf_from_sdf(sdf0, smooth=False, reduce=sdf_reduce)
if cfg_train.get('load_bg_all', False):
bg_density0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='bg_density', return_raw=True)
model.init_bg_density_from_bg_density(bg_density0)
utils.load_weight_by_name(model, cfg_train.load_sdf_from, name='bg')
elif cfg_train.get('load_bg_density', False):
bg_density0 = utils.load_grid_data(model, cfg_train.load_sdf_from, name='bg_density', return_raw=True)
model.init_bg_density_from_bg_density(bg_density0)
else:
model = utils.load_grid_data(model, cfg_train.load_sdf_from, name='sdf')
smooth = getattr(model, 'init_sdf_smooth', False)
if smooth:
model.sdf = model.smooth_conv(model.sdf)
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
elif args.sdf_mode != "density" and load_density_from:
smooth = getattr(model, 'init_density_smooth', True)
model.init_sdf_from_density(smooth=smooth, reduce=1)
# have to recreate the optimizer
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
# initial mesh evaluation
# if stage == 'surf':
# gt_eval = 'dtu' in cfg.basedir
# validate_mesh(model, resolution=256, world_space=True, prefix="init", scale_mats_np=data_dict['scale_mats_np'], scene=args.scene, gt_eval=gt_eval)
# init rendering setup
render_kwargs = {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': cfg_model.stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
}
# init batch rays sampler
def gather_training_rays():
if data_dict['irregular_shape']:
rgb_tr_ori = [images[i].to('cpu' if cfg.data.load2gpu_on_the_fly else device) for i in i_train]
mask_tr_ori = [masks[i].to('cpu' if cfg.data.load2gpu_on_the_fly else device) for i in i_train]
else:
rgb_tr_ori = images[i_train].to('cpu' if cfg.data.load2gpu_on_the_fly else device)
mask_tr_ori = masks[i_train].to('cpu' if cfg.data.load2gpu_on_the_fly else device)
if cfg_train.ray_sampler == 'in_maskcache':
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays_in_maskcache_sampling(
rgb_tr_ori=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train],
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y,
model=model, render_kwargs=render_kwargs,
)
elif cfg_train.ray_sampler == 'flatten':
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays_flatten(
rgb_tr_ori=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train], ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
else:
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz = Model.get_training_rays(
rgb_tr=rgb_tr_ori,
train_poses=poses[i_train],
HW=HW[i_train], Ks=Ks[i_train], ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
index_generator = Model.batch_indices_generator(len(rgb_tr), cfg_train.N_rand)
if cfg_train.ray_sampler == 'patch':
# patch sampler contains lots of empty spaces, remove them.
index_generator = Model.batch_indices_generator(len(rgb_tr), 1)
batch_index_sampler = lambda: next(index_generator)
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz, batch_index_sampler
rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz, batch_index_sampler = gather_training_rays()
if cfg_train.pervoxel_lr:
def per_voxel_init():
cnt = model.voxel_count_views(
rays_o_tr=rays_o_tr, rays_d_tr=rays_d_tr, imsz=imsz, near=near, far=far,
stepsize=cfg_model.stepsize, downrate=cfg_train.pervoxel_lr_downrate,
irregular_shape=data_dict['irregular_shape'])
optimizer.set_pervoxel_lr(cnt)
with torch.no_grad():
model.density[cnt <= 2] = -100
per_voxel_init()
# GOGO
psnr_lst = []
weight_lst = []
mask_lst = []
bg_mask_lst = []
weight_sum_lst = []
weight_nonzero_lst = []
s_val_lst = []
time0 = time.time()
logger.info("start: {} end: {}".format(1 + start, 1 + cfg_train.N_iters))
for global_step in trange(1+start, 1+cfg_train.N_iters):
# progress scaling checkpoint
if global_step in cfg_train.pg_scale:
if hasattr(model, 'num_voxels_bg'):
model.scale_volume_grid(model.num_voxels * scale_ratio, model.num_voxels_bg * scale_ratio)
else:
model.scale_volume_grid(model.num_voxels * scale_ratio)
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
# random sample rays
if cfg_train.ray_sampler in ['flatten', 'in_maskcache']:
sel_i = batch_index_sampler()
target = rgb_tr[sel_i]
rays_o = rays_o_tr[sel_i]
rays_d = rays_d_tr[sel_i]
viewdirs = viewdirs_tr[sel_i]
elif cfg_train.ray_sampler == 'patch':
sel_b = batch_index_sampler()
patch_size = cfg_train.N_patch
sel_r_start = torch.randint(rgb_tr.shape[1] - patch_size, [1])
sel_c_start = torch.randint(rgb_tr.shape[2] - patch_size, [1])
sel_r, sel_c = torch.meshgrid(torch.arange(sel_r_start[0], sel_r_start[0] + patch_size),
torch.arange(sel_c_start[0], sel_c_start[0] + patch_size))
sel_r, sel_c = sel_r.reshape(-1), sel_c.reshape(-1)
target = rgb_tr[sel_b, sel_r, sel_c]
rays_o = rays_o_tr[sel_b, sel_r, sel_c]
rays_d = rays_d_tr[sel_b, sel_r, sel_c]
viewdirs = viewdirs_tr[sel_b, sel_r, sel_c]
elif cfg_train.ray_sampler == 'random':
sel_b = torch.randint(rgb_tr.shape[0], [cfg_train.N_rand])
sel_r = torch.randint(rgb_tr.shape[1], [cfg_train.N_rand])
sel_c = torch.randint(rgb_tr.shape[2], [cfg_train.N_rand])
target = rgb_tr[sel_b, sel_r, sel_c]
rays_o = rays_o_tr[sel_b, sel_r, sel_c]
rays_d = rays_d_tr[sel_b, sel_r, sel_c]
viewdirs = viewdirs_tr[sel_b, sel_r, sel_c]
else:
raise NotImplementedError
if cfg.data.load2gpu_on_the_fly:
target = target.to(device)
rays_o = rays_o.to(device)
rays_d = rays_d.to(device)
viewdirs = viewdirs.to(device)
# volume rendering
render_result = model(rays_o, rays_d, viewdirs, global_step=global_step, **render_kwargs)
# gradient descent step
optimizer.zero_grad(set_to_none=True)
loss = cfg_train.weight_main * F.mse_loss(render_result['rgb_marched'], target)
psnr = utils.mse2psnr(loss.detach()).item()
if cfg_train.weight_entropy_last > 0:
pout = render_result['alphainv_cum'][...,-1].clamp(1e-6, 1-1e-6)
entropy_last_loss = -(pout*torch.log(pout) + (1-pout)*torch.log(1-pout)).mean()
loss += cfg_train.weight_entropy_last * entropy_last_loss
if cfg_train.weight_rgbper > 0:
rgbper = (render_result['raw_rgb'] - target.unsqueeze(-2)).pow(2).sum(-1)
rgbper_loss = (rgbper * render_result['weights'].detach()).sum(-1).mean()
loss += cfg_train.weight_rgbper * rgbper_loss
if global_step>cfg_train.tv_from and global_step<cfg_train.tv_end and global_step%cfg_train.tv_every==0:
if cfg_train.weight_tv_density>0:
tv_terms = getattr(cfg_train, 'tv_terms', dict())
sdf_tv, smooth_grad_tv = tv_terms['sdf_tv'], tv_terms['smooth_grad_tv']
if smooth_grad_tv > 0:
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=0, smooth_grad_tv=smooth_grad_tv)
if getattr(cfg_train, 'ori_tv', False):
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=sdf_tv, smooth_grad_tv=0)
weight_tv_k0 = getattr(cfg_train, 'weight_tv_k0')
if weight_tv_k0 > 0:
k0_tv_terms = getattr(cfg_train, 'k0_tv_terms', dict())
loss += cfg_train.weight_tv_k0 * model.k0_total_variation(**k0_tv_terms)
if getattr(tv_terms, 'bg_density_tv', 0):
loss += cfg_train.weight_tv_density * model.density_total_variation(sdf_tv=0, smooth_grad_tv=0, bg_density_tv=tv_terms['bg_density_tv'])
if getattr(cfg_train, 'ori_tv', False) and cfg_train.get('weight_bg_tv_k0', 0) >0 and global_step>cfg_train.tv_from and global_step%cfg_train.tv_every==0 and global_step<cfg_train.tv_end:
bg_k0_tv_terms = getattr(cfg_train, 'bg_k0_tv_terms', dict())
loss += cfg_train.get('weight_bg_tv_k0', 0) * model.bg_k0_total_variation(**bg_k0_tv_terms)
if getattr(cfg_train, 'weight_rgb0', 0.) > 0:
loss += F.mse_loss(render_result['rgb_marched0'], target) * cfg_train.weight_rgb0
loss.backward()
# make sure that density has no grad
if global_step>cfg_train.tv_from and global_step<cfg_train.tv_end and global_step%cfg_train.tv_every==0:
if not getattr(cfg_train, 'ori_tv', False):
if cfg_train.weight_tv_density>0:
tv_terms = getattr(cfg_train, 'tv_terms', dict())
sdf_tv = tv_terms['sdf_tv']
if sdf_tv > 0:
model.sdf_total_variation_add_grad(
cfg_train.weight_tv_density * sdf_tv / len(rays_o), global_step < cfg_train.tv_dense_before)
bg_density_tv = getattr(tv_terms, 'bg_density_tv', 0)
if bg_density_tv > 0:
model.bg_density_total_variation_add_grad(
cfg_train.weight_tv_density * bg_density_tv / len(rays_o), global_step < cfg_train.tv_dense_before)
if cfg_train.weight_tv_k0 > 0:
model.k0_total_variation_add_grad(
cfg_train.weight_tv_k0 / len(rays_o), global_step < cfg_train.tv_dense_before)
if getattr(cfg_train, 'weight_bg_tv_k0', 0) > 0:
model.bg_k0_total_variation_add_grad(
cfg_train.weight_bg_tv_k0 / len(rays_o), global_step < cfg_train.tv_dense_before)
optimizer.step()
wm = render_result['weights'].max(-1)[0]
ws = render_result['weights'].sum(-1)
if (wm>0).float().mean() > 0:
psnr_lst.append(psnr)
weight_lst.append(wm[wm>0].mean().detach().cpu().numpy())
weight_sum_lst.append(ws[ws>0].mean().detach().cpu().numpy())
weight_nonzero_lst.append((ws>0).float().mean().detach().cpu().numpy())
mask_lst.append(render_result['mask'].float().mean().detach().cpu().numpy())
if 'bg_mask' in render_result:
bg_mask_lst.append(render_result['bg_mask'].float().mean().detach().cpu().numpy())
s_val = render_result["s_val"] if "s_val" in render_result else 0
s_val_lst.append(s_val)
# writer.add_scalar('train/psnr', psnr, global_step)
# writer.add_scalar('train/s_val', s_val, global_step)
# writer.add_scalar('train/mask', mask_lst[-1], global_step)
global_step_ = global_step - 1
# update lr
N_iters = cfg_train.N_iters
if not getattr(cfg_train, 'cosine_lr', ''):
decay_steps = cfg_train.lrate_decay * 1000
decay_factor = 0.1 ** (1/decay_steps)
for i_opt_g, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = param_group['lr'] * decay_factor
else:
def cosine_lr_func(iter, warm_up_iters, warm_up_min_ratio, max_steps, const_warm_up=False, min_ratio=0):
if iter < warm_up_iters:
if not const_warm_up:
lr = warm_up_min_ratio + (1 - warm_up_min_ratio) * (iter / warm_up_iters)
else:
lr = warm_up_min_ratio
else:
lr = (1 + math.cos((iter - warm_up_iters) / (max_steps - warm_up_iters) * math.pi)) * 0.5 * (1 - min_ratio) + min_ratio
return lr
def extra_warm_up_func(iter, start_iter, warm_up_iters, warm_up_min_ratio):
if iter >= start_iter:
extra_lr = warm_up_min_ratio + (1 - warm_up_min_ratio) * (iter - start_iter) / warm_up_iters
return min(extra_lr, 1.0)
else:
return 1.0
warm_up_iters = cfg_train.cosine_lr_cfg.get('warm_up_iters', 0)
warm_up_min_ratio = cfg_train.cosine_lr_cfg.get('warm_up_min_ratio', 1.0)
const_warm_up = cfg_train.cosine_lr_cfg.get('const_warm_up', False)
cos_min_ratio = cfg_train.cosine_lr_cfg.get('cos_min_ratio', False)
if global_step == 0:
pre_decay_factor = 1.0
else:
pre_decay_factor = cosine_lr_func(global_step_ - 1, warm_up_iters, warm_up_min_ratio, N_iters, const_warm_up, cos_min_ratio)
pos_decay_factor = cosine_lr_func(global_step_, warm_up_iters, warm_up_min_ratio, N_iters, const_warm_up, cos_min_ratio)
decay_factor = pos_decay_factor / pre_decay_factor
for i_opt_g, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = param_group['lr'] * decay_factor
decay_step_module = getattr(cfg_train, 'decay_step_module', dict())
if global_step_ in decay_step_module:
for i_opt_g, param_group in enumerate(optimizer.param_groups):
if param_group['name'] in decay_step_module[global_step_]:
decay_factor = decay_step_module[global_step_][param_group['name']]
param_group['lr'] = param_group['lr'] * decay_factor
logger.info('- '*10 + '[Decay lrate] for {} by {}'.format(param_group['name'], decay_factor) + ' -'*10)
# update tv terms
tv_updates = getattr(cfg_train, 'tv_updates', dict())
if global_step_ in tv_updates:
for tv_term, value in tv_updates[global_step_].items():
setattr(cfg_train.tv_terms, tv_term, value)
logger.info('- '*10 + '[Update tv]: ' + str(tv_updates[global_step_]) + ' -'*10)
# update s_val func
s_updates = getattr(cfg_model, 's_updates', dict())
if global_step_ in s_updates:
for s_term, value in s_updates[global_step_].items():
setattr(model, s_term, value)
logger.info('- '*10 + '[Update s]: ' + str(s_updates[global_step_]) + ' -'*10)
# update smooth kernel
smooth_updates = getattr(cfg_model, 'smooth_updates', dict())
if global_step_ in smooth_updates:
model.init_smooth_conv(**smooth_updates[global_step_])
logger.info('- '*10 + '[Update smooth conv]: ' + str(smooth_updates[global_step_]) + ' -'*10)
# check log & save
if global_step%args.i_print==0:
eps_time = time.time() - time0
eps_time_str = f'{eps_time//3600:02.0f}:{eps_time//60%60:02.0f}:{eps_time%60:02.0f}'
bg_mask_mean = 0. if len(bg_mask_lst) == 0 else np.mean(bg_mask_lst)
logger.info(f'scene_rep_reconstruction ({stage}): iter {global_step:6d} / '
f'Loss: {loss.item():.9f} / PSNR: {np.mean(psnr_lst):5.2f} / '
f'Wmax: {np.mean(weight_lst):5.2f} / Wsum: {np.mean(weight_sum_lst):5.2f} / W>0: {np.mean(weight_nonzero_lst):5.2f}'
f' / s_val: {np.mean(s_val_lst):5.2g} / mask\%: {100*np.mean(mask_lst):1.2f} / bg_mask\%: {100*bg_mask_mean:1.2f} '
f'Eps: {eps_time_str}')
psnr_lst, weight_lst, weight_sum_lst, weight_nonzero_lst, mask_lst, bg_mask_lst, s_val_lst = [], [], [], [], [], [], []
# validate image
if global_step%args.i_validate==0 and global_step != cfg_train.N_iters and stage == 'surf' and 'fine' in args.sdf_mode:
render_viewpoints_kwargs = {
'model': model,
'ndc': cfg.data.ndc,
'render_kwargs': {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': cfg_model.stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
'render_grad': True,
'render_depth': True,
'render_in_out': True,
},
}
validate_image(cfg, stage, global_step, data_dict, render_viewpoints_kwargs, eval_all=cfg_train.N_iters==global_step)
# validate mesh
prefix = args.prefix + '_' if args.prefix else ''
prefix += args.suffix + '_' if args.suffix else ''
if 'eval_iters' in cfg_train and stage == 'surf':
if global_step - start in cfg_train.eval_iters and stage == 'surf':
gt_eval = 'dtu' in cfg.basedir
cd = validate_mesh(model, resolution=256,
prefix="{}{}_fine".format(prefix, global_step),
gt_eval=gt_eval,
world_space=True,
scale_mats_np=data_dict['scale_mats_np'],
scene=args.scene)
# save checkpoints
if global_step == cfg_train.N_iters:
torch.save({
'global_step': global_step,
'model_kwargs': model.get_kwargs(),
'MaskCache_kwargs': model.get_MaskCache_kwargs(),
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, last_ckpt_path)
logger.info(f'scene_rep_reconstruction ({stage}): saved checkpoints at '+ last_ckpt_path)
# final mesh validation
if global_step == cfg_train.N_iters and stage == 'surf' and 'fine' in args.sdf_mode:
validate_mesh(model, 512, threshold=0.0, prefix="{}final".format(prefix), world_space=True,
scale_mats_np=data_dict['scale_mats_np'], gt_eval='dtu' in cfg.basedir, runtime=False, scene=args.scene)
def train(args, cfg, data_dict):
# init
logger.info('train: start')
eps_time = time.time()
with open(os.path.join(cfg.basedir, cfg.expname, 'args.txt'), 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
cfg.dump(os.path.join(cfg.basedir, cfg.expname, 'config.py'))
if args.run_dvgo_init:
# coarse geometry searching
eps_coarse = time.time()
xyz_min_coarse, xyz_max_coarse = compute_bbox_by_cam_frustrm(args=args, cfg=cfg, **data_dict)
scene_rep_reconstruction(
args=args, cfg=cfg,
cfg_model=cfg.coarse_model_and_render, cfg_train=cfg.coarse_train,
xyz_min=xyz_min_coarse, xyz_max=xyz_max_coarse,
data_dict=data_dict, stage='coarse', use_dvgo=True)
eps_coarse = time.time() - eps_coarse
eps_time_str = f'{eps_coarse//3600:02.0f}:{eps_coarse//60%60:02.0f}:{eps_coarse%60:02.0f}'
logger.info("+ "*10 + 'train: coarse geometry searching in' + eps_time_str + " +"*10 )
coarse_expname = cfg.expname0 + '/coarse'
coarse_ckpt_path = os.path.join(cfg.basedir, coarse_expname, f'coarse_last.tar')
logger.info("+ "*10 + 'coarse_expname' + coarse_expname + " +"*10)
if args.no_dvgo_init:
# for the w\o mask setting
box_size_ = cfg.surf_train.get('box_size', 1.5)
print(">>> box_size: ", box_size_)
xyz_min_fine, xyz_max_fine = torch.tensor([-box_size_,-box_size_,-box_size_]).cuda(), torch.tensor([box_size_, box_size_, box_size_]).cuda()
else:
xyz_min_fine, xyz_max_fine = compute_bbox_by_coarse_geo(
model_class=dvgo_ori.DirectVoxGO, model_path=coarse_ckpt_path,
thres=cfg.fine_model_and_render.bbox_thres)
if hasattr(cfg, 'surf_train'):
eps_surf = time.time()
scene_rep_reconstruction(
args=args, cfg=cfg,
cfg_model=cfg.surf_model_and_render, cfg_train=cfg.surf_train,
xyz_min=xyz_min_fine, xyz_max=xyz_max_fine,
data_dict=data_dict, stage='surf',
coarse_ckpt_path=coarse_ckpt_path)
eps_surf = time.time() - eps_surf
eps_time_str = f'{eps_surf//3600:02.0f}:{eps_surf//60%60:02.0f}:{eps_surf%60:02.0f}'
logger.info("+ "*10 + 'train: fine detail reconstruction in' + eps_time_str + " +"*10 )
eps_time = time.time() - eps_time
eps_time_str = f'{eps_time//3600:02.0f}:{eps_time//60%60:02.0f}:{eps_time%60:02.0f}'
logger.info('train: finish (eps time' + eps_time_str + ')')
def validate_image(cfg, stage, step, data_dict, render_viewpoints_kwargs, eval_all=True):
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_test_{stage}')
os.makedirs(testsavedir, exist_ok=True)
rand_idx = random.randint(0, len(data_dict['poses'][data_dict['i_test']])-1)
logger.info("validating test set idx: {}".format(rand_idx))
eval_lpips_alex = args.eval_lpips_alex and eval_all
eval_lpips_vgg = args.eval_lpips_alex and eval_all
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_test']][rand_idx][None],
HW=data_dict['HW'][data_dict['i_test']][rand_idx][None],
Ks=data_dict['Ks'][data_dict['i_test']][rand_idx][None],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_test']][rand_idx][None],
masks=[data_dict['masks'][i].cpu().numpy() for i in data_dict['i_test']][rand_idx][None],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=eval_lpips_alex, eval_lpips_vgg=eval_lpips_vgg, idx=rand_idx, step=step,
**render_viewpoints_kwargs)
def validate_mesh(model, resolution=128, threshold=0.0, prefix="", world_space=False,
scale_mats_np=None, gt_eval=False, runtime=True, scene=122, smooth=True,
extract_color=False):
os.makedirs(os.path.join(cfg.basedir, cfg.expname, 'meshes'), exist_ok=True)
bound_min = model.xyz_min.clone().detach().float()
bound_max = model.xyz_max.clone().detach().float()
gt_path = os.path.join(cfg.data.datadir, "stl_total.ply") if gt_eval else ''
vertices0, triangles = model.extract_geometry(bound_min, bound_max, resolution=resolution,
threshold=threshold, scale_mats_np=scale_mats_np,
gt_path=gt_path, smooth=smooth,
)
if world_space and scale_mats_np is not None:
vertices = vertices0 * scale_mats_np[0, 0] + scale_mats_np[:3, 3][None]
else:
vertices = vertices0
if extract_color:
# use normal direction as the viewdir
ray_pts = torch.from_numpy(vertices0).cuda().float().split(8192 * 32, 0)
vertex_colors = [model.mesh_color_forward(pts) for pts in ray_pts]
vertex_colors = (torch.concat(vertex_colors).cpu().detach().numpy() * 255.).astype( np.uint8)
mesh = trimesh.Trimesh(vertices, triangles, vertex_colors=vertex_colors)
else:
mesh = trimesh.Trimesh(vertices, triangles)
mesh_path = os.path.join(cfg.basedir, cfg.expname, 'meshes', "{}_".format(scene)+prefix+'.ply')
mesh.export(mesh_path)
logger.info("mesh saved at " + mesh_path)
if gt_eval:
mean_d2s, mean_s2d, over_all = dtu_eval.eval(mesh_path, scene=scene, eval_dir=os.path.join(cfg.basedir, cfg.expname, 'meshes'),
dataset_dir='data/DTU', suffix=prefix+'eval', use_o3d=False, runtime=runtime)
res = "standard point cloud sampling" if not runtime else "down sampled point cloud for fast eval (NOT standard!):"
logger.info("mesh evaluation with {}".format(res))
logger.info(" [ d2s: {:.3f} | s2d: {:.3f} | mean: {:.3f} ]".format(mean_d2s, mean_s2d, over_all))
return over_all
return 0.
if __name__=='__main__':
# load setup
parser = config_parser()
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
# reset the root by the scene id
if args.scene:
cfg.expname += "{}".format(args.scene)
cfg.data.datadir += "{}".format(args.scene)
cfg.expname0 = cfg.expname
cfg.expname = cfg.expname + '/' + cfg.exp_stage
if args.suffix:
cfg.expname += "_" + args.suffix
cfg.load_expname = args.load_expname if args.load_expname else cfg.expname
# set up tensorboard
writer_dir = os.path.join(cfg.basedir, cfg.expname0, 'logs_all', cfg.expname)
# writer = SummaryWriter(log_dir=writer_dir)
# set up the logger and tensorboard
cfg.basedir0 = cfg.basedir
if args.prefix:
cfg.basedir = os.path.join(cfg.basedir, args.prefix)
log_dir = os.path.join(cfg.basedir, cfg.expname, 'log')
os.makedirs(log_dir, exist_ok=True)
now = datetime.now()
time_str = now.strftime('%Y-%m-%d_%H-%M-%S')
logger = get_root_logger(logging.INFO, handlers=[
logging.FileHandler(os.path.join(log_dir, '{}_train.log').format(time_str))])
logger.info("+ "*10 + cfg.expname + " +"*10)
logger.info("+ "*10 + log_dir + " +"*10)
# set white or black color
if cfg.get('use_sp_color', False):
assert 'white_list' in cfg and 'black_list' in cfg
if int(args.scene) in cfg['white_list']:
assert args.scene not in cfg['black_list']
cfg.data.white_bkgd = True
logger.info("+ "*10 + str(args.scene) + ' white bg ' + " +"*10)
if int(args.scene) in cfg['black_list']:
assert args.scene not in cfg['white_list']
cfg.data.white_bkgd = False
logger.info("+ "*10 + str(args.scene) + ' black bg ' + " +"*10)
# init enviroment
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device('cuda')
else:
device = torch.device('cpu')
seed_everything()
if getattr(cfg, 'load_expname', None) is None:
cfg.load_expname = args.load_expname if args.load_expname else cfg.expname
logger.info(cfg.load_expname)
os.makedirs(os.path.join(cfg.basedir, cfg.expname, 'recording'), exist_ok=True)
if not args.render_only or args.mesh_from_sdf:
copyfile('run.py', os.path.join(cfg.basedir, cfg.expname, 'recording', 'run.py'))
copyfile(args.config, os.path.join(cfg.basedir, cfg.expname, 'recording', args.config.split('/')[-1]))
import lib.dvgo_ori as dvgo_ori
if args.sdf_mode == "voxurf_coarse":
import lib.voxurf_coarse as Model
copyfile('lib/voxurf_coarse.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_coarse.py'))
elif args.sdf_mode == "voxurf_fine":
import lib.voxurf_fine as Model
copyfile('lib/voxurf_fine.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_fine.py'))
elif args.sdf_mode == "voxurf_womask_coarse":
import lib.voxurf_womask_coarse as Model
copyfile('lib/voxurf_womask_coarse.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_womask_coarse.py'))
elif args.sdf_mode == "voxurf_womask_fine":
import lib.voxurf_womask_fine as Model
copyfile('lib/voxurf_womask_fine.py', os.path.join(cfg.basedir, cfg.expname, 'recording','voxurf_womask_fine.py'))
else:
raise NameError
# load images / poses / camera settings / data split
data_dict = load_everything(args=args, cfg=cfg)
# export scene bbox and camera poses in 3d for debugging and visualization
if args.export_bbox_and_cams_only:
logger.info('Export bbox and cameras...')
xyz_min, xyz_max = compute_bbox_by_cam_frustrm(args=args, cfg=cfg, **data_dict)
poses, HW, Ks, i_train = data_dict['poses'], data_dict['HW'], data_dict['Ks'], data_dict['i_train']
near, far = data_dict['near'], data_dict['far']
cam_lst = []
for c2w, (H, W), K in zip(poses[i_train], HW[i_train], Ks[i_train]):
rays_o, rays_d, viewdirs = Model.get_rays_of_a_view(
H, W, K, c2w, cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y,)
cam_o = rays_o[0,0].cpu().numpy()
cam_d = rays_d[[0,0,-1,-1],[0,-1,0,-1]].cpu().numpy()
cam_lst.append(np.array([cam_o, *(cam_o+cam_d*max(near, far*0.05))]))
np.savez_compressed(args.export_bbox_and_cams_only,
xyz_min=xyz_min.cpu().numpy(), xyz_max=xyz_max.cpu().numpy(),
cam_lst=np.array(cam_lst))
logger.info('done')
sys.exit()
if args.mesh_from_sdf:
logger.info('Extracting mesh from sdf...')
with torch.no_grad():
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'surf_last.tar')
if os.path.exists(ckpt_path):
new_kwargs = cfg.surf_model_and_render
else:
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'fine_last.tar')
new_kwargs = cfg.fine_model_and_render
model = utils.load_model(Model.Voxurf, ckpt_path, new_kwargs).to(device)
prefix = args.prefix + '_' if args.prefix else ''
prefix += args.suffix + '_' if args.suffix else ''
gt_eval = 'dtu' in cfg.basedir
validate_mesh(model, 512, threshold=0.0, prefix="{}final_mesh".format(prefix), world_space=True,
scale_mats_np=data_dict['scale_mats_np'], gt_eval=gt_eval, runtime=False, scene=args.scene, extract_color=args.extract_color)
logger.info('done')
sys.exit()
# train
if not args.render_only:
train(args, cfg, data_dict)
# load model for rendring
if args.render_test or args.render_train or args.render_video or args.interpolate:
if args.ft_path:
ckpt_path = args.ft_path
new_kwargs = cfg.fine_model_and_render
elif hasattr(cfg, 'surf_train'):
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'surf_last.tar')
new_kwargs = cfg.surf_model_and_render
else:
ckpt_path = os.path.join(cfg.basedir, cfg.expname, 'fine_last.tar')
new_kwargs = cfg.fine_model_and_render
ckpt_name = ckpt_path.split('/')[-1][:-4]
print(">>> Loading from {}".format(ckpt_path))
model = utils.load_model(Model.Voxurf, ckpt_path, new_kwargs).to(device)
stepsize = cfg.fine_model_and_render.stepsize
render_viewpoints_kwargs = {
'model': model,
'ndc': cfg.data.ndc,
'render_kwargs': {
'near': data_dict['near'],
'far': data_dict['far'],
'bg': 1 if cfg.data.white_bkgd else 0,
'stepsize': stepsize,
'inverse_y': cfg.data.inverse_y,
'flip_x': cfg.data.flip_x,
'flip_y': cfg.data.flip_y,
'render_grad': True,
'render_depth': True,
'render_in_out': True,
},
}
if args.interpolate:
img_idx_0, img_idx_1 = args.interpolate.split('_')
img_idx_0 = int(img_idx_0)
img_idx_1 = int(img_idx_1)
savedir = os.path.join(cfg.basedir, cfg.expname, f'interpolate_{img_idx_0}_{img_idx_1}')
interpolate_view(savedir, img_idx_0, img_idx_1,
render_poses=data_dict['poses'],
HW=data_dict['HW'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
Ks=data_dict['Ks'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
render_factor=args.render_video_factor,
**render_viewpoints_kwargs
)
# render trainset and eval
if args.render_train:
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_train_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_train']],
HW=data_dict['HW'][data_dict['i_train']],
Ks=data_dict['Ks'][data_dict['i_train']],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_train']],
masks=data_dict['masks'],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=args.eval_lpips_alex, eval_lpips_vgg=args.eval_lpips_vgg,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
# render testset and eval
if args.render_test:
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_test_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=data_dict['poses'][data_dict['i_test']],
HW=data_dict['HW'][data_dict['i_test']],
Ks=data_dict['Ks'][data_dict['i_test']],
gt_imgs=[data_dict['images'][i].cpu().numpy() for i in data_dict['i_test']],
masks=[data_dict['masks'][i].cpu().numpy() for i in data_dict['i_test']],
savedir=testsavedir,
eval_ssim=args.eval_ssim, eval_lpips_alex=args.eval_lpips_alex, eval_lpips_vgg=args.eval_lpips_vgg,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
# render video
if args.render_video:
assert 'dtu' not in cfg.basedir, 'please try --interpolate for the DTU dataset.'
testsavedir = os.path.join(cfg.basedir, cfg.expname, f'render_video_{ckpt_name}')
os.makedirs(testsavedir, exist_ok=True)
rgbs, disps = render_viewpoints(
render_poses=torch.from_numpy(data_dict['render_poses']).cuda(),
HW=data_dict['HW'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
Ks=data_dict['Ks'][data_dict['i_test']][[0]].repeat(len(data_dict['render_poses']), 0),
render_factor=args.render_video_factor,
savedir=testsavedir,
**render_viewpoints_kwargs)
imageio.mimwrite(os.path.join(testsavedir, 'video.rgb.mp4'), utils.to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(os.path.join(testsavedir, 'video.disp.mp4'), utils.to8b(disps / np.max(disps)), fps=30, quality=8)
logger.info('Done')
| 56,095 | 49.130474 | 210 | py |
Voxurf | Voxurf-main/tools/preprocess/convert_cameras.py | import numpy as np
# import matplotlib.image as mpimg
# import matplotlib.pyplot as plt
# import cv2
# import argparse
# from glob import glob
import torch
import os
import argparse
import glob
import imageio
def _load_colmap(basedir, convert=True, suffix=''):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds{}.npy'.format(suffix)))
poses_arr = poses_arr[:, :15].reshape([-1, 3, 5]) # N x 3 x 5
if convert:
poses = poses_arr.transpose(1,2,0)
# from llff to opencv
poses = np.concatenate([poses[:, 1:2, :],
poses[:, 0:1, :],
-poses[:, 2:3, :],
poses[:, 3:4, :],
poses[:, 4:5, :]], 1)
poses_arr = poses.transpose(2,0,1)
poses = poses_arr[:,:,:4]
hwf = poses_arr[0,:3,-1]
H, W, focal = hwf
K = np.array([
[focal, 0, 0.5*W],
[0, focal, 0.5*H],
[0, 0, 1]
])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
# w2c = np.concatenate([w2c0, bottom], 1)
# P = np.matmul(K, w2c)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
# from opencv to opengl
# aa = np.linalg.inv(w2c) # the same as poses
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
def blendedmvs_to_NeuS(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
print(pose_paths)
import ipdb; ipdb.set_trace()
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,:3,:3], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks).float(), torch.from_numpy(w2c0).float()).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print("done")
def MVS_to_NeuS(basedir, cam_dir='cams'):
camera_files = os.listdir(os.path.join(basedir,cam_dir))
poses = []
K = None
for i in range(len(camera_files)):
file = "{:>08d}_cam.txt".format(i)
camera_file = os.path.join(basedir, cam_dir, file)
intrinsics, extrinsics, depth_params = read_cam_file(camera_file)
poses.append(extrinsics[None,...])
K = intrinsics
poses = np.vstack(poses)
# MVS extrinsic is world2cam already
# R = poses[:,:3,:3].transpose(0, 2, 1)
# t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy()
R = poses[:,:3,:3]
t = poses[:,:3,3:]
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print('done, saved at', os.path.join(basedir, 'cameras.npz'))
def TAT0_to_NeuS(basedir, cam_dir='pose'):
poses = []
K = None
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
for i, file in enumerate(pose_paths):
pose = np.loadtxt(file).astype(np.float32)
# intrinsics, extrinsics, depth_params = read_cam_file(camera_file)
poses.append(np.linalg.inv(pose)[None,...])
poses = np.vstack(poses).astype(np.float32)
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
K = np.loadtxt(path_intrinsics)[:3,:3].astype(np.float32)
# MVS extrinsic is world2cam already
# R = poses[:,:3,:3].transpose(0, 2, 1)
# t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy()
R = poses[:,:3,:3]
t = poses[:,:3,3:]
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
Ks = np.repeat(K[None,...], R.shape[0], axis=0)
P0 = torch.bmm(torch.from_numpy(Ks), torch.from_numpy(w2c0)).numpy()
P = np.concatenate([P0, bottom], 1)
camera_dict = {'world_mat_%d' % idx: P[idx] for idx in range(len(P))}
np.savez(os.path.join(basedir, 'cameras.npz'), **camera_dict)
print('done, saved at', os.path.join(basedir, 'cameras.npz'))
def NeuS_to_MVS(basedir):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
bds = poses_arr[:, -2:].transpose([1,0])
near, far = bds[bds[:,0] > 0, 0].min() * 0.8, bds[bds[:,1] > 0, 1].max() * 1.2
poses_arr = poses_arr[:, :15].reshape([-1, 3, 5]) # N x 3 x 5
poses = poses_arr.transpose(1,2,0)
# from llff to opencv
poses = np.concatenate([poses[:, 1:2, :],
poses[:, 0:1, :],
-poses[:, 2:3, :],
poses[:, 3:4, :],
poses[:, 4:5, :]], 1)
poses_arr = poses.transpose(2,0,1)
# camera to world
poses = poses_arr[:,:,:4]
hwf = poses_arr[0,:3,-1]
H, W, focal = hwf
K = np.array([
[focal, 0, 0.5*W],
[0, focal, 0.5*H],
[0, 0, 1]
])
R = poses[:, :3, :3].transpose(0, 2, 1) # (B, 3, 3)
t = -torch.bmm(torch.from_numpy(R), torch.from_numpy(poses[:, :3, 3:])).numpy() # (B, 3, 1)
bottom = np.repeat(np.array([0,0,0,1.]).reshape([1, 1, 4]), R.shape[0], axis=0)
w2c0 = np.concatenate([R, t], -1)
P = np.concatenate([w2c0, bottom], 1)
intrinsics, extrinsics = K, P
if not os.path.exists(os.path.join(basedir, 'cams_1')):
os.mkdir(os.path.join(basedir, 'cams_1'))
for i in range(poses.shape[0]):
file = "{:>08d}_cam.txt".format(i)
camera_file = os.path.join(basedir, 'cams_1', file)
with open(camera_file, "w") as f:
f.write("extrinsic\n")
for l in extrinsics[i]:
seq = ["{:.6f} ".format(e) for e in l] + ['\n']
f.writelines( seq )
f.write("\nintrinsic\n")
for l in intrinsics:
seq = ["{:.6f} ".format(e) for e in l] + ['\n']
f.writelines(seq)
f.write("\n{:.2f} {:.2f}\n".format(near, far))
def read_cam_file(filename):
"""Read camera intrinsics, extrinsics, and depth values (min, max) from text file
Args:
filename: cam text file path string
Returns:
Tuple with intrinsics matrix (3x3), extrinsics matrix (4x4), and depth params vector (min and max) if exists
"""
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
# depth min and max: line 11
if len(lines) >= 12:
depth_params = np.fromstring(lines[11], dtype=np.float32, sep=' ')
else:
depth_params = np.empty(0)
return intrinsics, extrinsics, depth_params
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', type=str, default='', help='data source folder for preprocess')
parser.add_argument('--mode', type=str, default='colmap', help='what kind of source format to convert')
opt = parser.parse_args()
if opt.mode == 'colmap':
_load_colmap(opt.source_dir, True)
elif opt.mode == 'mvs2neus':
MVS_to_NeuS(opt.source_dir)
elif opt.mode == 'tat02neus':
TAT0_to_NeuS(opt.source_dir)
elif opt.mode == 'neus2mvs':
NeuS_to_MVS(opt.source_dir)
elif opt.mode == 'blendedmvs2neus':
blendedmvs_to_NeuS(opt.source_dir)
else:
raise NotImplementedError
| 9,091 | 38.021459 | 116 | py |
Voxurf | Voxurf-main/lib/load_dtu.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_dtu_data(basedir, normalize=True, reso_level=2, mask=True, white_bg=True):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*png')))
if len(rgb_paths) == 0:
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
if len(rgb_paths) == 0:
rgb_paths = sorted(glob(os.path.join(basedir, 'rgb', '*png')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
if len(mask_paths) == 0:
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*jpg')))
render_cameras_name = 'cameras_sphere.npz' if normalize else 'cameras_large.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normalize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normalize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
if len(mask_paths) > 0:
mask_ = (imageio.imread(mask_paths[i]) / 255.).astype(np.float32)
if mask_.ndim == 3:
all_masks.append(mask_[...,:3])
else:
all_masks.append(mask_[...,None])
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0]
print("Date original shape: ", H, W)
masks = np.stack(all_masks, 0)
if mask:
assert len(mask_paths) > 0
bg = 1. if white_bg else 0.
imgs = imgs * masks + bg * (1 - masks)
if reso_level > 1:
H, W = int(H / reso_level), int(W / reso_level)
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
if masks is not None:
masks = F.interpolate(torch.from_numpy(masks).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K[:2] /= reso_level
focal /= reso_level
# this is to randomly fetch images.
i_test = [8, 13, 16, 21, 26, 31, 34]
if len(imgs) * 0.1 >= 8:
print("add 56 to test set")
i_test.append(56)
i_test = [i for i in i_test if i < len(imgs)]
i_val = i_test
i_train = list(set(np.arange(len(imgs))) - set(i_test))
i_split = [np.array(i_train), np.array(i_val), np.array(i_test)]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats_np[0], masks
class Dataset:
def __init__(self, conf):
super(Dataset, self).__init__()
print('Load data: Begin')
self.device = torch.device('cuda')
self.conf = conf
self.data_dir = conf.get_string('data_dir')
self.render_cameras_name = conf.get_string('render_cameras_name')
self.object_cameras_name = conf.get_string('object_cameras_name')
self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)
self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)
camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))
self.camera_dict = camera_dict
self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))
self.n_images = len(self.images_lis)
self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0
self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))
self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0
# world_mat is a projection matrix from world to image
self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.scale_mats_np = []
# scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.
self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.intrinsics_all = []
self.pose_all = []
for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
self.pose_all.append(torch.from_numpy(pose).float())
self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]
self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]
self.focal = self.intrinsics_all[0][0, 0]
self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]
self.H, self.W = self.images.shape[1], self.images.shape[2]
self.image_pixels = self.H * self.W
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
# Object scale mat: region of interest to **extract mesh**
object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']
object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]
object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]
self.object_bbox_min = object_bbox_min[:3, 0]
self.object_bbox_max = object_bbox_max[:3, 0]
print('Load data: End')
def near_far_from_sphere(self, rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
def image_at(self, idx, resolution_level):
img = cv.imread(self.images_lis[idx])
return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) | 7,606 | 41.497207 | 120 | py |
Voxurf | Voxurf-main/lib/dvgo_ori.py | import os
import time
import functools
import numpy as np
import cv2
import mcubes
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
'''Model'''
class DirectVoxGO(torch.nn.Module):
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False, pre_act_density=False, in_act_density=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
**kwargs):
super(DirectVoxGO, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.pre_act_density = pre_act_density
self.in_act_density = in_act_density
if self.pre_act_density:
print('dvgo: using pre_act_density may results in worse quality !!')
if self.in_act_density:
print('dvgo: using in_act_density may results in worse quality !!')
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if rgbnet_dim <= 0:
# color voxel grid (coarse stage)
self.k0_dim = 3
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet = None
else:
# feature voxel grid + shallow MLP (fine stage)
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('dvgo: feature voxel grid', self.k0.shape)
print('dvgo: mlp', self.rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
def inside_sphere(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
sphere_mask = (torch.linalg.norm(self_grid_xyz, ord=2, dim=-1, keepdim=True) < 1.0).reshape(*self.density.shape)
self.density[~sphere_mask] = -100
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dvgo: voxel_size ', self.voxel_size)
print('dvgo: world_size ', self.world_size)
print('dvgo: voxel_size_base ', self.voxel_size_base)
print('dvgo: voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'pre_act_density': self.pre_act_density,
'in_act_density': self.in_act_density,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
'pre_act_density': self.pre_act_density,
'in_act_density': self.in_act_density,
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dvgo: scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
if self.k0_dim > 0:
self.k0 = torch.nn.Parameter(
F.interpolate(self.k0.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
else:
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
if self.mask_cache is not None:
self._set_nonempty_mask()
print('dvgo: scale_volume_grid finish')
def voxel_count_views(self, rays_o_tr, rays_d_tr, imsz, near, far, stepsize, downrate=1, irregular_shape=False):
print('dvgo: voxel_count_views start')
eps_time = time.time()
N_samples = int(np.linalg.norm(np.array(self.density.shape[2:])+1) / stepsize) + 1
rng = torch.arange(N_samples)[None].float()
count = torch.zeros_like(self.density.detach())
device = rng.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = torch.ones_like(self.density).requires_grad_()
if irregular_shape:
rays_o_ = rays_o_.split(10000)
rays_d_ = rays_d_.split(10000)
else:
rays_o_ = rays_o_[::downrate, ::downrate].to(device).flatten(0,-2).split(10000)
rays_d_ = rays_d_[::downrate, ::downrate].to(device).flatten(0,-2).split(10000)
for rays_o, rays_d in zip(rays_o_, rays_d_):
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
self.grid_sampler(rays_pts, ones).sum().backward()
with torch.no_grad():
count += (ones.grad > 1)
eps_time = time.time() - eps_time
print('dvgo: voxel_count_views finish (eps time:', eps_time, 'sec)')
return count
def density_total_variation(self):
tv = total_variation(self.activate_density(self.density, 1), self.nonempty_mask)
return tv
def k0_total_variation(self):
if self.rgbnet is not None:
v = self.k0
else:
v = torch.sigmoid(self.k0)
return total_variation(v, self.nonempty_mask)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
ret_lst = [
# TODO: use `rearrange' to make it readable
F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
for grid in grids
]
if len(ret_lst) == 1:
return ret_lst[0]
return ret_lst
def sample_ray(self, rays_o, rays_d, near, far, stepsize, is_train=False, near_far=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
N_samples = int(np.linalg.norm(np.array(self.density.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
# import ipdb; ipdb.set_trace()
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
# import ipdb; ipdb.set_trace()
return rays_pts, mask_outbbox
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
# sample points on rays
rays_pts, mask_outbbox = self.sample_ray(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
interval = render_kwargs['stepsize'] * self.voxel_size_ratio
# update mask for query points in known free space
if self.mask_cache is not None:
mask_outbbox[~mask_outbbox] |= (~self.mask_cache(rays_pts[~mask_outbbox]))
# query for alpha
alpha = torch.zeros_like(rays_pts[...,0])
vis_density = torch.zeros_like(rays_pts[...,0])
if self.pre_act_density:
# pre-activation
density = None
alpha[~mask_outbbox] = self.grid_sampler(
rays_pts[~mask_outbbox], self.activate_density(self.density, interval))
elif self.in_act_density:
# in-activation
density = self.grid_sampler(rays_pts[~mask_outbbox], F.softplus(self.density + self.act_shift))
alpha[~mask_outbbox] = 1 - torch.exp(-density * interval)
else:
# post-activation
density = self.grid_sampler(rays_pts[~mask_outbbox], self.density)
alpha[~mask_outbbox] = self.activate_density(density, interval)
# compute accumulated transmittance
weights, alphainv_cum = get_ray_marching_ray(alpha)
# import ipdb; ipdb.set_trace()
vis_density[~mask_outbbox] = density
# if global_step is not None:
# if global_step % 100 == 0:
# self.visualize_weight(vis_density, alpha, weights, step=global_step)
# query for color
mask = (weights > self.fast_color_thres)
k0 = torch.zeros(*weights.shape, self.k0_dim).to(weights)
if not self.rgbnet_full_implicit:
k0[mask] = self.grid_sampler(rays_pts[mask], self.k0)
if self.rgbnet is None:
# no view-depend effect
rgb = torch.sigmoid(k0)
else:
# view-dependent color emission
if self.rgbnet_direct:
k0_view = k0
else:
k0_view = k0[..., 3:]
k0_diffuse = k0[..., :3]
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat([viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (rays_pts[mask] - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat = torch.cat([
k0_view[mask],
xyz_emb,
# TODO: use `rearrange' to make it readable
viewdirs_emb.flatten(0,-2).unsqueeze(-2).repeat(1,weights.shape[-1],1)[mask.flatten(0,-2)]
], -1)
rgb_logit = torch.zeros(*weights.shape, 3).to(weights)
rgb_logit[mask] = self.rgbnet(rgb_feat)
if self.rgbnet_direct:
rgb = torch.sigmoid(rgb_logit)
else:
rgb_logit[mask] = rgb_logit[mask] + k0_diffuse[mask]
rgb = torch.sigmoid(rgb_logit)
# Ray marching
rgb_marched = (weights[...,None] * rgb).sum(-2) + alphainv_cum[...,[-1]] * render_kwargs['bg']
rgb_marched = rgb_marched.clamp(0, 1)
depth = (rays_o[...,None,:] - rays_pts).norm(dim=-1)
depth = (weights * depth).sum(-1) + alphainv_cum[...,-1] * render_kwargs['far']
disp = 1 / depth
ret_dict.update({
'alphainv_cum': alphainv_cum,
'weights': weights,
'rgb_marched': rgb_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
})
return ret_dict
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, mode="density"):
if mode == "density":
query_func = lambda pts: self.activate_density(self.grid_sampler(pts, self.density))
threshold = 0.001
elif mode == "neus":
query_func = lambda pts: self.grid_sampler(pts, - self.sdf)
threshold = 0.0
else:
raise NameError
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.density.shape[2] < 100:
idxs = [self.density.shape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
# density_img = self.density[0,0,i].cpu().detach().numpy()
# density_img = density_img - density_img.min()
# density_img = (density_img / density_img.max()) * 255
# cv2.imwrite(os.path.join(root, "debug_figs/density_{}_{}.png".format(iter, i)), density_img)
alpha = 1 - torch.exp(-F.softplus(self.density + self.act_shift)).cpu().detach().numpy()
# alpha_img = (alpha > 0.001) * 255
alpha_img = alpha[0,0,i] * 255
cv2.imwrite(os.path.join(root, "debug_figs/density_alpha_{}_{}.png".format(iter, i)), alpha_img)
# sdf_img = self.sdf[0,0,i].cpu().detach().numpy()
# sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
# cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
# print("{:.7f}, {:.7f}, {:.7f}".format(self.sdf[0,0,i].mean(), self.sdf.mean(), self.grad_conv.weight.data.mean()))
def visualize_weight(self, density, alpha, weight, root='', step=0, thrd=0.001):
idxs = weight.sum(-1).sort()[-1][-100:]
idxs = [idxs[i] for i in [0, 20, 40, 60, 80]]
density[density<-5] = -5
plt.figure(figsize=(20,4))
for n, i in enumerate(idxs):
# vis = (weight[i] > thrd).cpu().numpy()
vis = np.arange(weight.shape[1])
# import ipdb; ipdb.set_trace()
ax1 = plt.subplot(1, 5, n+1)
ax1.plot(density.detach().cpu().numpy()[i][vis], label='density')
ax2 = ax1.twinx()
ax2.plot(alpha.detach().cpu().numpy()[i][vis], color='green', label='alpha')
ax2.plot(weight.detach().cpu().numpy()[i][vis], color='red', label='weight')
plt.legend()
plt.savefig(os.path.join(root, "debug_figs/weight_{}.png".format(step)))
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
self.pre_act_density = st['MaskCache_kwargs'].get('pre_act_density', False)
self.in_act_density = st['MaskCache_kwargs'].get('in_act_density', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
elif self.pre_act_density:
alpha = 1 - torch.exp(-F.softplus(self.density + self.act_shift) * self.voxel_size_ratio)
alpha = F.grid_sample(self.density, ind_norm, align_corners=True)
elif self.in_act_density:
density = F.grid_sample(F.softplus(self.density + self.act_shift), ind_norm, align_corners=True)
alpha = 1 - torch.exp(-density * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox = model.sample_ray(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
# import ipdb; ipdb.set_trace()
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
def extract_fields(bound_min, bound_max, resolution, query_func, N = 64):
X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
return u
def extract_geometry(bound_min, bound_max, resolution, threshold, query_func, N = 64):
print('threshold: {}'.format(threshold))
u = extract_fields(bound_min, bound_max, resolution, query_func, N)
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices, triangles
| 32,546 | 45.231534 | 149 | py |
Voxurf | Voxurf-main/lib/load_nsvf.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
def load_nsvf_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], [], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
with open(os.path.join(basedir, 'intrinsics.txt')) as f:
focal = float(f.readline().split()[0])
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)
return imgs, poses, render_poses, [H, W, focal], i_split
| 1,712 | 26.629032 | 115 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_coarse.py | import os
import time
import functools
import numpy as np
import cv2
import math
import random
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import copy
# import MinkowskiEngine as Me
from . import grid
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_bg=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0, bg_fast_color_thres=0,
rgbnet_dim=0, bg_rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4, bg_posbase_pe=5, bg_viewbase_pe=4,
geo_rgb_dim=3,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_ksize=0, smooth_sigma=1, smooth_scale=False,
bg_rgbnet_width=128, bg_rgbnet_depth=4,
sdf_thr=1.0, tv_in_sphere=True,
init_ball_scale=0.5, use_layer_norm=False, bg_use_layer_norm=False, set_sphere_freq=10000,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.bg_fast_color_thres = bg_fast_color_thres
self.nearest = nearest
self.set_sphere_freq = set_sphere_freq
self.sdf_thr = sdf_thr
self.tv_in_sphere = tv_in_sphere
self.init_ball_scale = init_ball_scale
self.use_layer_norm = use_layer_norm
self.bg_use_layer_norm = bg_use_layer_norm
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels, num_voxels_bg)
# init density voxel grid
# self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.bg_density = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x_min, y_min, z_min = self.xyz_min.cpu().numpy()
x_max, y_max, z_max = self.xyz_max.cpu().numpy()
x, y, z = np.mgrid[x_min:x_max:self.world_size[0].item() * 1j, y_min:y_max:self.world_size[1].item() * 1j, z_min:z_max:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - self.init_ball_scale).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
self.smooth_scale = smooth_scale
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.bg_k0_dim = bg_rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.bg_k0 = grid.create_grid(
'DenseGrid', channels=self.bg_k0_dim, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(bg_posbase_pe)]))
self.register_buffer('bg_posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
self.register_buffer('bg_viewfreq', torch.FloatTensor([(2**i) for i in range(bg_viewbase_pe)]))
self.use_xyz = posbase_pe >= 0
self.bg_use_xyz = bg_posbase_pe >= 0
self.use_view = viewbase_pe >= 0
self.bg_use_view = bg_viewbase_pe >= 0
dim0 = 0
if self.use_xyz:
dim0 += (3 + 3 * posbase_pe * 2)
if self.use_view >= 0:
dim0 += (3 + 3 * viewbase_pe * 2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.geo_rgb_dim = geo_rgb_dim
if self.geo_rgb_dim:
dim0 += self.geo_rgb_dim
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
dim0 = 0
if self.bg_use_xyz:
dim0 += (3 + 3 * bg_posbase_pe * 2)
if self.bg_use_view:
dim0 += (3 + 3 * bg_viewbase_pe * 2)
if self.rgbnet_full_implicit:
pass
elif rgbnet_direct:
dim0 += self.bg_k0_dim
else:
dim0 += self.bg_k0_dim-3
if not self.bg_use_layer_norm:
self.bg_rgbnet = nn.Sequential(
nn.Linear(dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
else:
self.bg_rgbnet = nn.Sequential(
nn.Linear(dim0, bg_rgbnet_width), nn.LayerNorm(bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width), nn.LayerNorm(bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
nn.init.constant_(self.bg_rgbnet[-1].bias, 0)
if self.bg_k0 is not None:
print('background feature voxel grid', self.bg_k0.grid.shape)
print('background mlp', self.bg_rgbnet)
# do not use mask cache
self.mask_cache_path = None
self.mask_cache_thres = mask_cache_thres
self.mask_cache = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
self.nonempty_mask = None
self._set_sphere_nonempty_mask()
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
# print("- "*10 + "init gradient conv done" + " -"*10)
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def _set_grid_resolution(self, num_voxels, num_voxels_bg=0):
# Determine grid resolution
if num_voxels_bg == 0:
num_voxels_bg = num_voxels
self.num_voxels = num_voxels
self.num_voxels_bg = num_voxels_bg
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.voxel_size_bg = ((self.xyz_max - self.xyz_min).prod() / num_voxels_bg).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_size_bg = ((self.xyz_max - self.xyz_min) / self.voxel_size_bg).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('voxel_size_bg ', self.voxel_size_bg)
print('world_size ', self.world_size)
print('world_size_bg ', self.world_size_bg)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'geo_rgb_dim':self.geo_rgb_dim,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
}
@torch.no_grad()
def _set_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
# self.bg_density.grid[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
print('- '*10, 'setting mask cache!', ' -'*10)
@torch.no_grad()
def _set_sphere_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = (self_grid_xyz[...,0] ** 2 + self_grid_xyz[...,1] ** 2 + self_grid_xyz[...,2] ** 2) < 1.
nonempty_mask = nonempty_mask[None, None]
self.sphere_mask = nonempty_mask
self.sdf.grid[~self.sphere_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
# self.sdf.grid[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels, num_voxels_bg=0):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels, num_voxels_bg)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
if num_voxels_bg > 0:
ori_world_size_bg = self.world_size_bg
print('scale_volume_grid scale [background] world_size from', ori_world_size_bg, 'to', self.world_size_bg)
self.sdf.scale_volume_grid(self.world_size)
self.bg_density.scale_volume_grid(self.world_size)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.bg_k0_dim > 0:
self.bg_k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
print('scale_volume_grid finish')
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, bg_density_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
if bg_density_tv > 0:
tv += total_variation(self.bg_density) / 2 / self.voxel_size * bg_density_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def bg_k0_total_variation(self, bg_k0_tv=1., bg_k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.bg_k0
else:
v = torch.sigmoid(self.bg_k0.grid)
tv = 0
if bg_k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if bg_k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None, s=1):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(- s * F.softplus(density + self.act_shift) * interval)
def neus_sdf_gradient(self, mode=None, sdf=None):
# the gradient grid from the sdf grid
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
""""""
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, smooth=False, displace=0.):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if displace !=0:
ind_norm[...,:] += displace * self.voxel_size
# TODO: use `rearrange' to make it readable
if smooth:
grid = self.smooth_conv(grids[0])
else:
grid = grids[0]
ret_lst = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners
).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
return ret_lst
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, use_bg=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
if use_bg:
N_samples = int(np.linalg.norm(np.array(self.bg_density.grid.shape[2:])+1) / stepsize) + 1
else:
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
if use_bg:
step = stepsize * self.voxel_size_bg * rng
else:
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def outside_sphere_trans(self, pts, pts_norm=None, filtered=False):
# r^2 = x^2 + y^2 + z^2; x = x / r^2
out_pts = pts.clone()
if filtered:
out_pts = out_pts / pts_norm ** 2
return out_pts
if pts_norm is None:
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)
out_pts[~inside_sphere[...,0]] = out_pts[~inside_sphere[...,0]] / pts_norm[~inside_sphere[...,0]] ** 2
out_pts[inside_sphere[...,0]] = -10
return out_pts, ~inside_sphere
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
if global_step is not None:
if global_step in [1, 100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 16000, 17000, 18000, 19000, 20000] or global_step % self.set_sphere_freq == 0:
self._set_sphere_nonempty_mask()
ret_dict = {}
N = len(rays_o)
# sample points on rays
inner_pts, inner_ray_id, inner_step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
pts_norm = torch.linalg.norm(inner_pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)[:,0]
inner_pts, inner_ray_id, inner_step_id = \
inner_pts[inside_sphere], inner_ray_id[inside_sphere], inner_step_id[inside_sphere]
bg_render_kwargs = copy.deepcopy(render_kwargs)
# old sample ray
outer_pts_org, bg_mask_outbbox, bg_step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=True, **bg_render_kwargs)
outer_ray_id, outer_step_id = create_full_step_id(outer_pts_org.shape[:2])
bg_pts_norm = torch.linalg.norm(outer_pts_org, ord=2, dim=-1, keepdim=True)
bg_inside_sphere = (bg_pts_norm < 1.0)[...,0]
outer_pts = self.outside_sphere_trans(outer_pts_org, bg_pts_norm, filtered=True)
bg_mask = ~bg_inside_sphere & bg_mask_outbbox
dist_thres = self.voxel_size * render_kwargs['stepsize'] * 0.95
dist = (outer_pts[:, 1:] - outer_pts[:, :-1]).norm(dim=-1)
dist_mask = ub360_utils_cuda.cumdist_thres(dist, dist_thres)
bg_mask[:,1:] &= dist_mask
outer_pts, outer_ray_id, outer_step_id = \
outer_pts[bg_mask], outer_ray_id[bg_mask.view(-1)], outer_step_id[bg_mask.view(-1)]
outer_pts_org = outer_pts_org[bg_mask]
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
sdf = self.grid_sampler(inner_pts, sdf_grid)
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(inner_pts, self.gradient)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, in_alpha = self.neus_alpha_from_sdf_scatter(viewdirs, inner_ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
if self.fast_color_thres > 0:
mask = in_weights > self.fast_color_thres
inner_pts = inner_pts[mask]
inner_ray_id = inner_ray_id[mask]
inner_step_id = inner_step_id[mask]
in_alpha = in_alpha[mask]
gradient = gradient[mask]
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
bg_interval = bg_render_kwargs['stepsize'] * self.voxel_size_ratio
bg_density = self.bg_density(outer_pts)
bg_alpha = self.activate_density(bg_density, bg_interval)
if self.bg_fast_color_thres > 0:
mask = bg_alpha > self.fast_color_thres
outer_pts = outer_pts[mask]
outer_ray_id = outer_ray_id[mask]
bg_alpha = bg_alpha[mask]
outer_pts_org = outer_pts_org[mask]
bg_weights, bg_alphainv_last = Alphas2Weights.apply(bg_alpha, outer_ray_id, N)
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(inner_pts)
rgb_feat.append(k0)
if self.use_xyz:
rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[inner_ray_id])
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
# outside
bg_rgb_feat = []
if self.bg_k0_dim > 0:
bg_k0 = self.bg_k0(outer_pts)
bg_rgb_feat.append(bg_k0)
if self.bg_use_xyz:
bg_rays_xyz = (outer_pts_org - self.xyz_min) / (self.xyz_max - self.xyz_min)
bg_xyz_emb = (bg_rays_xyz.unsqueeze(-1) * self.bg_posfreq).flatten(-2)
bg_xyz_emb = torch.cat(
[bg_rays_xyz, bg_xyz_emb.sin(), bg_xyz_emb.cos()], -1)
bg_rgb_feat.append(bg_xyz_emb)
if self.bg_use_view:
bg_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.bg_viewfreq).flatten(-2)
bg_viewdirs_emb = torch.cat(
[viewdirs, bg_viewdirs_emb.sin(), bg_viewdirs_emb.cos()], -1)
bg_rgb_feat.append(bg_viewdirs_emb.flatten(0, -2)[outer_ray_id])
bg_rgb_feat = torch.cat(bg_rgb_feat, -1)
bg_rgb_logit = self.bg_rgbnet(bg_rgb_feat)
bg_rgb = torch.sigmoid(bg_rgb_logit)
in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
bg_marched = segment_coo(
src=(bg_weights.unsqueeze(-1) * bg_rgb),
index=outer_ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_in_weights = segment_coo(
src=(in_weights.unsqueeze(-1)),
index=inner_ray_id, out=torch.zeros([N, 1]), reduce='sum')
rgb_marched = in_marched + (1 - cum_in_weights) * bg_marched
rgb_marched = rgb_marched.clamp(0, 1)
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * normal),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(in_weights * inner_step_id * dist),
index=inner_ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = None
ret_dict.update({
'alphainv_cum': in_alphainv_last,
'weights': in_weights,
'bg_weights': bg_weights,
'pts_norm': pts_norm,
'rgb_marched': rgb_marched,
'in_marched': in_marched,
'out_marched': bg_marched,
'normal_marched': normal_marched,
'raw_alpha': in_alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"s_val": s_val,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
### coarse-stage geometry and texture are low in resolution
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
if self.use_xyz:
rays_xyz = (ray_pts - self.xyz_min) / (
self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, **kwargs):
self._set_sphere_nonempty_mask()
sdf_grid = self.sdf.grid.clone()
if self.smooth_sdf:
sdf_grid = self.smooth_conv(sdf_grid)
else:
sdf_grid = sdf_grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.bg_density.grid.shape[2] < 100:
idxs = [self.bg_density.grid.shape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
sdf_img = self.sdf.grid[0,0,i].cpu().detach().numpy()
sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
def visualize_weight(self, weight1, weight2, thrd=0.001):
idxs = weight1.sum(-1).sort()[-1][-100:]
for i in idxs:
plt.figure()
vis = weight1[i] > thrd
plt.plot(weight1.detach().cpu().numpy()[i][vis])
plt.plot(weight2.detach().cpu().numpy()[i][vis])
plt.savefig("weight_{}.png".format(i))
''' Misc
'''
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
@functools.lru_cache(maxsize=128)
def create_full_step_id(shape):
ray_id = torch.arange(shape[0]).view(-1,1).expand(shape).flatten()
step_id = torch.arange(shape[1]).view(1,-1).expand(shape).flatten()
return ray_id, step_id
| 44,856 | 42.720273 | 162 | py |
Voxurf | Voxurf-main/lib/load_blendedmvs.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_blendedmvs_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
""" delete single side """
# ref_pos = poses[0][:,-1]
# dist = ((poses[:,:,-1] - ref_pos[None]) ** 2).sum(-1)
# i_select = np.argsort(dist)[:20]
# i_split[0] = i_select.tolist()
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
render_poses = torch.Tensor(np.loadtxt(os.path.join(basedir, 'test_traj.txt')).reshape(-1,4,4).astype(np.float32))
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 1,312 | 30.261905 | 118 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_fine.py | import os
import time
import functools
import numpy as np
from copy import deepcopy
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import math
import random
import copy
from . import grid
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_bg=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0, bg_fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
center_sdf=False, grad_feat=(1.0,), sdf_feat=(),
use_layer_norm=False,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_sdf=False,
smooth_ksize=0, smooth_sigma=1,
k_rgbnet_depth=3, k_res=False, k_posbase_pe=5, k_viewbase_pe=4,
k_center_sdf=False, k_grad_feat=(1.0,), k_sdf_feat=(),
smooth_scale=True, use_grad_norm=True,
use_rgb_k=True, k_detach_1=True, k_detach_2=True,
use_rgbnet_k0=False,
bg_rgbnet_dim=12, bg_posbase_pe=5, bg_viewbase_pe=4,
bg_rgbnet_width=128, bg_rgbnet_depth=3, tv_in_sphere=False,
init_ball_scale=0.5, init_bg_density_fix=False, set_sphere_freq=20000,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.bg_fast_color_thres = bg_fast_color_thres
self.nearest = nearest
self.set_sphere_freq = set_sphere_freq
self.init_bg_density_fix = init_bg_density_fix
self.tv_in_sphere = tv_in_sphere
self.init_ball_scale = init_ball_scale
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.smooth_sdf = smooth_sdf
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels, num_voxels_bg)
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.bg_density = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x_min, y_min, z_min = self.xyz_min.cpu().numpy()
x_max, y_max, z_max = self.xyz_max.cpu().numpy()
x, y, z = np.mgrid[x_min:x_max:self.world_size[0].item() * 1j, y_min:y_max:self.world_size[1].item() * 1j, z_min:z_max:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - self.init_ball_scale).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
self.smooth_scale = smooth_scale
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.k0_dim = rgbnet_dim
self.bg_k0_dim = bg_rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.bg_k0 = grid.create_grid(
'DenseGrid', channels=self.bg_k0_dim, world_size=self.world_size_bg,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
self.use_xyz = posbase_pe >= 0
self.use_view = viewbase_pe >= 0
dim0 = 0
if self.use_xyz:
dim0 += (3 + 3 * posbase_pe * 2)
if self.use_view:
dim0 += (3 + 3 * viewbase_pe * 2)
self.use_grad_norm = use_grad_norm
self.center_sdf = center_sdf
self.grad_feat = grad_feat
self.sdf_feat = sdf_feat
self.use_rgb_k = use_rgb_k
self.k_detach_1 = k_detach_1
self.k_detach_2 = k_detach_2
self.use_rgbnet_k0 = use_rgbnet_k0
self.use_layer_norm = use_layer_norm
dim0 += len(self.grad_feat) * 3
dim0 += len(self.sdf_feat) * 6
if self.use_rgbnet_k0:
dim0 += self.k0_dim
if self.center_sdf:
dim0 += 1
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# the second rgb net
self.k_res = k_res
self.k_center_sdf = k_center_sdf
self.k_grad_feat = k_grad_feat
self.k_sdf_feat = k_sdf_feat
self.register_buffer('k_posfreq', torch.FloatTensor([(2**i) for i in range(k_posbase_pe)]))
self.register_buffer('k_viewfreq', torch.FloatTensor([(2**i) for i in range(k_viewbase_pe)]))
self.k_use_xyz = k_posbase_pe >= 0
self.k_use_view = k_viewbase_pe >= 0
k_dim0 = (3+3*k_posbase_pe*2) + (3+3*k_viewbase_pe*2) + self.k0_dim
if self.k_res:
k_dim0 += 3
if self.k_center_sdf:
k_dim0 += 1
k_dim0 += len(self.k_grad_feat) * 3
k_dim0 += len(self.k_sdf_feat) * 6
if not self.use_layer_norm:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.bg_k0.grid.shape)
print('k_rgbnet mlp', self.k_rgbnet)
self.bg_rgbnet_width=bg_rgbnet_width
self.bg_rgbnet_depth=bg_rgbnet_depth
self.register_buffer('bg_posfreq', torch.FloatTensor([(2**i) for i in range(bg_posbase_pe)]))
self.register_buffer('bg_viewfreq', torch.FloatTensor([(2**i) for i in range(bg_viewbase_pe)]))
self.bg_use_xyz = bg_posbase_pe >= 0
self.bg_use_view = bg_viewbase_pe >= 0
bg_dim0 = (3+3*bg_posbase_pe*2) + (3+3*bg_viewbase_pe*2) + self.bg_k0_dim
if not self.use_layer_norm:
self.bg_rgbnet = nn.Sequential(
nn.Linear(bg_dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
else:
self.bg_rgbnet = nn.Sequential(
nn.Linear(bg_dim0, bg_rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(bg_rgbnet_width, bg_rgbnet_width),
nn.ReLU(inplace=True))
for _ in range(bg_rgbnet_depth - 2)
],
nn.Linear(bg_rgbnet_width, 3),
)
nn.init.constant_(self.bg_rgbnet[-1].bias, 0)
print('bg mlp', self.bg_rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
mask_cache_path = None
mask_cache_thres = None
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
self._set_sphere_nonempty_mask()
# grad conv to calculate gradient
self.init_gradient_conv()
print(" ",self.xyz_min)
self.grad_mode = grad_mode
self.global_step = 0
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
# print("- "*10 + "init gradient conv done" + " -"*10)
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
def init_smooth_conv_test_k3(self, ksize=3, sigma=0.4):
self.smooth_conv_test_k3 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv_test_k5(self, ksize=5, sigma=0.4):
self.smooth_conv_test_k5 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_sdf(self, sdf0=None, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from sdf" + " -"*3, "\n")
if sdf0.shape != self.sdf.grid.shape:
sdf0 = F.interpolate(sdf0, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(sdf0 / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data).to(self.sdf.grid) / reduce
else:
self.sdf.grid.data = sdf0.to(self.sdf.grid) / reduce # + self.act_shift
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
self.gradient = self.neus_sdf_gradient()
def init_bg_density_from_bg_density(self, bg_density):
print("\n", "- "*3 + "initing bg_density from bg_density" + " -"*3, "\n")
if bg_density.shape != self.bg_density.grid.shape:
if self.init_bg_density_fix:
bg_density = F.interpolate(bg_density, size=tuple(self.world_size_bg), mode='trilinear', align_corners=True)
else:
bg_density = F.interpolate(bg_density, size=tuple(self.world_size), mode='trilinear', align_corners=True)
self.bg_density.grid.data = bg_density.to(self.bg_density.grid)
if self.mask_cache is not None:
self._set_nonempty_mask()
def _set_grid_resolution(self, num_voxels, num_voxels_bg=0):
# Determine grid resolution
if num_voxels_bg == 0:
num_voxels_bg = num_voxels
self.num_voxels = num_voxels
self.num_voxels_bg = num_voxels_bg
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.voxel_size_bg = ((self.xyz_max - self.xyz_min).prod() / num_voxels_bg).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_size_bg = ((self.xyz_max - self.xyz_min) / self.voxel_size_bg).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('voxel_size_bg ', self.voxel_size_bg)
print('world_size ', self.world_size)
print('world_size_bg ', self.world_size_bg)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'grad_feat': self.grad_feat,
'sdf_feat': self.sdf_feat,
'k_grad_feat': self.k_grad_feat,
'k_sdf_feat': self.k_sdf_feat,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest,
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
# self.bg_density.grid[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
print('- '*10, 'setting mask cache!', ' -'*10)
@torch.no_grad()
def _set_sphere_nonempty_mask(self):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = (self_grid_xyz[...,0] ** 2 + self_grid_xyz[...,1] ** 2 + self_grid_xyz[...,2] ** 2) < 1.
nonempty_mask = nonempty_mask[None, None]
self.sphere_mask = nonempty_mask
self.sdf.grid[~self.sphere_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
# self.bg_density.grid[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels, num_voxels_bg=0):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels, num_voxels_bg)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
if num_voxels_bg > 0:
ori_world_size_bg = self.world_size_bg
print('scale_volume_grid scale [background] world_size from', ori_world_size_bg, 'to', self.world_size_bg)
self.sdf.scale_volume_grid(self.world_size)
self.bg_density.scale_volume_grid(self.world_size_bg)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.bg_k0_dim > 0:
self.bg_k0.scale_volume_grid(self.world_size_bg)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
if self.mask_cache is not None:
self._set_nonempty_mask()
self._set_sphere_nonempty_mask()
print('scale_volume_grid finish')
def bg_density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.bg_density.total_variation_add_grad(w, w, w, dense_mode)
def sdf_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.sdf.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.k0.total_variation_add_grad(w, w, w, dense_mode)
def bg_k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.bg_k0.total_variation_add_grad(w, w, w, dense_mode)
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, bg_density_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
if bg_density_tv > 0:
tv += total_variation(self.bg_density.grid) / 2 / self.voxel_size * bg_density_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def bg_k0_total_variation(self, bg_k0_tv=1., bg_k0_grad_tv=0.):
nonempty_mask = self.sphere_mask if self.nonempty_mask is None else self.nonempty_mask
if not self.tv_in_sphere:
nonempty_mask[...] = 1
if self.rgbnet is not None:
v = self.bg_k0.grid
else:
v = torch.sigmoid(self.bg_k0.grid)
tv = 0
if bg_k0_tv > 0:
tv += total_variation(v, nonempty_mask.repeat(1,v.shape[1],1,1,1))
if bg_k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def activate_sdf(self, sdf, s=10):
return sdf
def neus_sdf_gradient(self, mode=None, sdf=None):
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
""""""
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def neus_alpha_from_sdf(self, viewdirs, steps, sdf, gradients, global_step, is_train, use_mid=True):
ori_shape = viewdirs.shape
n_samples = steps.shape[-1]
# force s_val value to change with global step
if is_train:
batch_size = steps.shape[0]
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
assert len(viewdirs.size()) > 2
dirs = viewdirs.reshape(-1, 3)
steps = steps.reshape(-1, n_samples)
batch_size = dirs.shape[0]
s_val = 0
if steps.shape[0] == 1:
steps = steps.repeat(batch_size,1)
dirs = viewdirs.unsqueeze(-2)
inv_s = torch.ones(1).cuda() / self.s_val # * torch.exp(-inv_s)
inv_s = inv_s.expand(batch_size * n_samples, 1)
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
iter_cos = iter_cos.reshape(-1,1)
sdf = sdf.reshape(-1, 1)
# calculate dist from steps / z_vals
dists = steps[..., 1:] - steps[..., :-1]
dists = torch.cat([dists, torch.Tensor([dists.mean()]).expand(dists[..., :1].shape)], -1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
else:
estimated_next_sdf = torch.cat([sdf[...,1:], sdf[...,-1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[...,:1], sdf[...,:-1]], -1).reshape(-1, 1)
# when the interval is not the same, the inv_s should not be the same? or it should be
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
if not is_train:
alpha = alpha.reshape((ori_shape[0], ori_shape[1], n_samples))
return s_val, alpha #, weights
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, sample_ret=True, sample_grad=False, displace=0.1, smooth=False):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
if smooth:
grid = self.smooth_conv(grids[0])
grids[0] = grid
outs = []
if sample_ret:
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid = grids[0]
ret = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(
grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze(-1)
outs.append(ret)
if sample_grad:
grid = grids[0]
feat, grad = self.sample_sdfs(xyz, grid, displace_list=[1.0], use_grad_norm=False)
feat = torch.cat([feat[:, 4:6], feat[:, 2:4], feat[:, 0:2]], dim=-1)
grad = torch.cat([grad[:, [2]], grad[:, [1]], grad[:, [0]]], dim=-1)
outs.append(grad)
outs.append(feat)
if len(outs) == 1:
return outs[0]
else:
return outs
def sample_sdfs(self, xyz, *grids, displace_list, mode='bilinear', align_corners=True, use_grad_norm=False):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
grid = grids[0]
# ind from xyz to zyx !!!!!
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid_size = grid.size()[-3:]
size_factor_zyx = torch.tensor([grid_size[2], grid_size[1], grid_size[0]]).cuda()
ind = ((ind_norm + 1) / 2) * (size_factor_zyx - 1)
offset = torch.tensor([[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]).cuda()
displace = torch.tensor(displace_list).cuda()
offset = offset[:, None, :] * displace[None, :, None]
all_ind = ind.unsqueeze(-2) + offset.view(-1, 3)
all_ind = all_ind.view(1, 1, 1, -1, 3)
all_ind[..., 0] = all_ind[..., 0].clamp(min=0, max=size_factor_zyx[0] - 1)
all_ind[..., 1] = all_ind[..., 1].clamp(min=0, max=size_factor_zyx[1] - 1)
all_ind[..., 2] = all_ind[..., 2].clamp(min=0, max=size_factor_zyx[2] - 1)
all_ind_norm = (all_ind / (size_factor_zyx-1)) * 2 - 1
feat = F.grid_sample(grid, all_ind_norm, mode=mode, align_corners=align_corners)
all_ind = all_ind.view(1, 1, 1, -1, 6, len(displace_list), 3)
diff = all_ind[:, :, :, :, 1::2, :, :] - all_ind[:, :, :, :, 0::2, :, :]
diff, _ = diff.max(dim=-1)
feat_ = feat.view(1, 1, 1, -1, 6, len(displace_list))
feat_diff = feat_[:, :, :, :, 1::2, :] - feat_[:, :, :, :, 0::2, :]
grad = feat_diff / diff / self.voxel_size
feat = feat.view(shape[-1], 6, len(displace_list))
grad = grad.view(shape[-1], 3, len(displace_list))
if use_grad_norm:
grad = grad / (grad.norm(dim=1, keepdim=True) + 1e-5)
feat = feat.view(shape[-1], 6 * len(displace_list))
grad = grad.view(shape[-1], 3 * len(displace_list))
return feat, grad
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, use_bg=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
if use_bg:
N_samples = int(np.linalg.norm(np.array(self.bg_density.grid.shape[2:])+1) / stepsize) + 1
else:
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
if use_bg:
step = stepsize * self.voxel_size_bg * rng
else:
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def outside_sphere_trans(self, pts, pts_norm=None, filtered=False):
# r^2 = x^2 + y^2 + z^2; x = x / r^2
out_pts = pts.clone()
if filtered:
out_pts = out_pts / pts_norm ** 2
return out_pts
if pts_norm is None:
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0)
out_pts[~inside_sphere[...,0]] = out_pts[~inside_sphere[...,0]] / pts_norm[~inside_sphere[...,0]] ** 2
out_pts[inside_sphere[...,0]] = -10
return out_pts, ~inside_sphere
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
if global_step is not None:
if global_step in [1, 100, 500, 1000, 2000, 3000, 5000, 10000, 15000, 16000, 17000, 18000, 19000, 20000] or global_step % self.set_sphere_freq == 0:
self._set_sphere_nonempty_mask()
ret_dict = {}
N = len(rays_o)
# sample points on rays
# inner_pts, inner_ray_id, inner_step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
# rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
inner_pts, mask_outbbox, step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=False, **render_kwargs)
inner_ray_id, inner_step_id = create_full_step_id(inner_pts.shape[:2])
pts_norm = torch.linalg.norm(inner_pts, ord=2, dim=-1, keepdim=True)
inside_sphere = (pts_norm < 1.0).view(-1)
inner_pts, inner_ray_id, inner_step_id = \
inner_pts.view(-1, 3)[inside_sphere], inner_ray_id[inside_sphere], inner_step_id[inside_sphere]
bg_render_kwargs = copy.deepcopy(render_kwargs)
''' old sample ray '''
outer_pts_org, bg_mask_outbbox, bg_step = self.sample_ray_ori(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, use_bg=False, **bg_render_kwargs)
outer_ray_id, outer_step_id = create_full_step_id(outer_pts_org.shape[:2])
bg_pts_norm = torch.linalg.norm(outer_pts_org, ord=2, dim=-1, keepdim=True)
bg_inside_sphere = (bg_pts_norm < 1.0)[...,0]
outer_pts = self.outside_sphere_trans(outer_pts_org, bg_pts_norm, filtered=True)
bg_mask = ~bg_inside_sphere
dist_thres = self.voxel_size * render_kwargs['stepsize'] * 0.5
dist = (outer_pts[:, 1:] - outer_pts[:, :-1]).norm(dim=-1)
dist_mask = ub360_utils_cuda.cumdist_thres(dist, dist_thres)
bg_mask[:,1:] &= dist_mask
outer_pts, outer_ray_id, outer_step_id = \
outer_pts[bg_mask], outer_ray_id[bg_mask.view(-1)], outer_step_id[bg_mask.view(-1)]
outer_pts_org = outer_pts_org[bg_mask]
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
sdf, gradient, feat = self.grid_sampler(inner_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, in_alpha = self.neus_alpha_from_sdf_scatter(viewdirs, inner_ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
in_weights, in_alphainv_last = Alphas2Weights.apply(in_alpha, inner_ray_id, N)
if self.fast_color_thres > 0:
mask = in_weights > self.fast_color_thres
in_weights = in_weights[mask]
inner_pts = inner_pts[mask]
inner_ray_id = inner_ray_id[mask]
inner_step_id = inner_step_id[mask]
in_alpha = in_alpha[mask]
gradient = gradient[mask]
sdf = sdf[mask]
# assert self.separate_modelling
bg_interval = bg_render_kwargs['stepsize'] * self.voxel_size_ratio
bg_density = self.bg_density(outer_pts)
bg_alpha = self.activate_density(bg_density, bg_interval)
bg_weights, bg_alphainv_last = Alphas2Weights.apply(bg_alpha, outer_ray_id, N)
if self.bg_fast_color_thres > 0:
mask = bg_weights > self.fast_color_thres
outer_pts = outer_pts[mask]
outer_ray_id = outer_ray_id[mask]
outer_step_id = outer_step_id[mask]
bg_alpha = bg_alpha[mask]
outer_pts_org = outer_pts_org[mask]
bg_weights = bg_weights[mask]
rgb_feat = []
k_rgb_feat = []
if self.k0_dim > 0:
k0 = self.k0(inner_pts)
if self.use_rgbnet_k0:
rgb_feat.append(k0)
k_rgb_feat.append(k0)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(inner_pts, sdf_grid, displace_list=all_grad_inds_, use_grad_norm=self.use_grad_norm)
rgb_feat.append(all_feat)
rgb_feat.append(all_grad)
else:
all_feat, all_grad = None, None
if self.use_xyz:
rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[inner_ray_id])
if self.center_sdf:
rgb_feat.append(sdf[:, None])
rgb_feat = torch.cat(rgb_feat, -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
# outside
bg_rgb_feat = []
if self.bg_k0_dim > 0:
bg_k0 = self.bg_k0(outer_pts)
bg_rgb_feat.append(bg_k0)
if self.bg_use_xyz:
bg_rays_xyz = (outer_pts_org - self.xyz_min) / (self.xyz_max - self.xyz_min)
bg_xyz_emb = (bg_rays_xyz.unsqueeze(-1) * self.bg_posfreq).flatten(-2)
bg_xyz_emb = torch.cat(
[bg_rays_xyz, bg_xyz_emb.sin(), bg_xyz_emb.cos()], -1)
bg_rgb_feat.append(bg_xyz_emb)
if self.bg_use_view:
bg_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.bg_viewfreq).flatten(-2)
bg_viewdirs_emb = torch.cat(
[viewdirs, bg_viewdirs_emb.sin(), bg_viewdirs_emb.cos()], -1)
bg_rgb_feat.append(bg_viewdirs_emb.flatten(0, -2)[outer_ray_id])
bg_rgb_feat = torch.cat(bg_rgb_feat, -1)
bg_rgb_logit = self.bg_rgbnet(bg_rgb_feat)
bg_rgb = torch.sigmoid(bg_rgb_logit)
in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
bg_marched = segment_coo(
src=(bg_weights.unsqueeze(-1) * bg_rgb),
index=outer_ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_in_weights = segment_coo(
src=(in_weights.unsqueeze(-1)),
index=inner_ray_id, out=torch.zeros([N, 1]), reduce='sum')
bg_marched = bg_marched.clamp(0, 1)
in_marched = in_marched.clamp(0, 1)
rgb_marched = in_marched + (1 - cum_in_weights) * bg_marched
rgb_marched = rgb_marched.clamp(0, 1)
in_marched = in_marched
if self.use_rgb_k:
if self.k_use_xyz:
k_rays_xyz = (inner_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
k_xyz_emb = (k_rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([k_rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_rgb_feat.append(k_xyz_emb)
if self.k_use_view:
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat.append(k_viewdirs_emb.flatten(0, -2)[inner_ray_id])
if self.k_center_sdf:
k_rgb_feat.append(sdf[:, None])
k_rgb_feat.append(gradient)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat.append(color_feat.detach())
else:
k_rgb_feat.append(color_feat)
k_rgb_feat = torch.cat(k_rgb_feat, -1)
k_rgb_logit = self.k_rgbnet(k_rgb_feat)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + k_rgb_logit
else:
k_rgb_logit = rgb_logit + k_rgb_logit
k_rgb = torch.sigmoid(k_rgb_logit)
k_in_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * k_rgb),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
k_in_marched = k_in_marched.clamp(0, 1)
k_rgb_marched = k_in_marched + (1 - cum_in_weights) * bg_marched
k_rgb_marched = k_rgb_marched.clamp(0, 1)
in_marched = k_in_marched
else:
k_rgb_marched = None
# Ray marching
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(in_weights.unsqueeze(-1) * normal),
index=inner_ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
out_marched = bg_marched
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(in_weights * inner_step_id * dist),
index=inner_ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = 0
ret_dict.update({
'alphainv_cum': (1 - cum_in_weights)[..., 0],
'weights': in_weights,
'bg_weights': bg_weights,
'pts_norm': pts_norm,
'rgb_marched': rgb_marched,
# 'k_rgb_marched': k_rgb_marched,
'in_marched': in_marched,
'out_marched': out_marched,
'normal_marched': normal_marched,
'raw_alpha': in_alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": None,
"s_val": s_val,
})
if self.use_rgb_k:
ret_dict.update({
'rgb_marched': k_rgb_marched,
'rgb_marched0': rgb_marched,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
# self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
# gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
k_rgb_feat = []
if self.k0_dim > 0:
k0 = self.k0(ray_pts)
if self.use_rgbnet_k0:
rgb_feat.append(k0)
k_rgb_feat.append(k0)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid,
displace_list=all_grad_inds_,
use_grad_norm=self.use_grad_norm)
rgb_feat.append(all_feat)
rgb_feat.append(all_grad)
if self.use_xyz:
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
if self.use_view:
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
if self.center_sdf:
rgb_feat.append(sdf[:, None])
rgb_feat = torch.cat(rgb_feat, -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
if self.k_use_xyz:
k_rays_xyz = (ray_pts - self.xyz_min) / (
self.xyz_max - self.xyz_min)
k_xyz_emb = (k_rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(
-2)
k_xyz_emb = torch.cat(
[k_rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_rgb_feat.append(k_xyz_emb)
if self.k_use_view:
k_viewdirs_emb = (
viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(
-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat.append(k_viewdirs_emb.flatten(0, -2))
if self.k_center_sdf:
k_rgb_feat.append(sdf[:, None])
k_rgb_feat.append(gradient)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat.append(color_feat.detach())
else:
k_rgb_feat.append(color_feat)
k_rgb_feat = torch.cat(k_rgb_feat, -1)
k_rgb_logit = self.k_rgbnet(k_rgb_feat)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + k_rgb_logit
else:
k_rgb_logit = rgb_logit + k_rgb_logit
rgb = torch.sigmoid(k_rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, smooth=True, sigma=0.5, **kwargs):
self._set_sphere_nonempty_mask()
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
if smooth:
self.init_smooth_conv_test_k3(sigma=sigma)
sdf_grid = self.smooth_conv_test_k3(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
def visualize_density_sdf(self, root='', iter=0, idxs=None):
if idxs is None:
if self.density.grid.shape[2] < 100:
idxs = [self.density.grid.hape[2] // 2]
else:
idxs = [60]
os.makedirs(os.path.join(root, "debug_figs"), exist_ok=True)
for i in idxs:
sdf_img = self.sdf.grid[0,0,i].cpu().detach().numpy()
sdf_img = (sdf_img + 1 / 2).clip(0,1) * 255
cv2.imwrite(os.path.join(root, "debug_figs/sdf_{}_{}.png".format(iter, i)), sdf_img)
def visualize_weight(self, weight1, weight2, thrd=0.001):
idxs = weight1.sum(-1).sort()[-1][-100:]
for i in idxs:
plt.figure()
vis = weight1[i] > thrd
plt.plot(weight1.detach().cpu().numpy()[i][vis])
plt.plot(weight2.detach().cpu().numpy()[i][vis])
plt.savefig("weight_{}.png".format(i))
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
def total_variation_step2(v, mask=None):
tv2 = (v[:,:,2:,:,:] - v[:,:,:-2,:,:]).abs() / 2
tv3 = (v[:,:,:,2:,:] - v[:,:,:,:-2,:]).abs() / 2
tv4 = (v[:,:,:,:,2:] - v[:,:,:,:,:-2]).abs() / 2
if mask is not None:
tv2 = tv2[mask[:,:,:-2] & mask[:,:,2:]]
tv3 = tv3[mask[:,:,:,:-2] & mask[:,:,:,2:]]
tv4 = tv4[mask[:,:,:,:,:-2] & mask[:,:,:,:,2:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs, rgbnet_sup_reduce=1):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
# rgb_tr_new_res = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox, _ = model.sample_ray_ori(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
# rgb_tr_new_res[top:top+n].copy_(img_new_res[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
@functools.lru_cache(maxsize=128)
def create_full_step_id(shape):
ray_id = torch.arange(shape[0]).view(-1,1).expand(shape).flatten()
step_id = torch.arange(shape[1]).view(1,-1).expand(shape).flatten()
return ray_id, step_id
| 65,267 | 43.61244 | 162 | py |
Voxurf | Voxurf-main/lib/voxurf_coarse.py | import os
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
from . import grid
from lib.dvgo_ori import extract_geometry
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/render_utils.cpp', 'cuda/render_utils_kernel.cu']],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4, geo_rgb_dim=3,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_ksize=0, smooth_sigma=1,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
if self.sdf_init_mode == "ball_init":
x, y, z = np.mgrid[-1.0:1.0:self.world_size[0].item() * 1j, -1.0:1.0:self.world_size[1].item() * 1j, -1.0:1.0:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 -1).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf.grid = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
self.rgbnet_full_implicit = rgbnet_full_implicit
if rgbnet_dim <= 0:
# color voxel grid (dvgo coarse stage)
self.k0_dim = 3
self.k0 = torch.nn.Parameter(torch.zeros([1, self.k0_dim, *self.world_size]))
self.rgbnet = None
else:
if self.rgbnet_full_implicit:
self.k0_dim = 0
else:
self.k0_dim = rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
if rgbnet_direct:
dim0 += self.k0_dim
else:
dim0 += self.k0_dim-3
self.geo_rgb_dim = geo_rgb_dim
if self.geo_rgb_dim:
dim0 += self.geo_rgb_dim
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
return m
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_density(self, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from density" + " -"*3, "\n")
self.s = torch.nn.Parameter(torch.ones(1)) * 10
if zero2neg:
self.density.data[self.density.data==0] = -100
if self.density.shape != self.sdf.grid.shape:
self.density.data = F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
# first build the smoothing kernel
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(-torch.tanh(self.density.data) / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data)
else:
self.sdf.grid.data = -torch.tanh(self.density.data) / reduce # + self.act_shift
self.gradient = self.neus_sdf_gradient()
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('voxel_size ', self.voxel_size)
print('world_size ', self.world_size)
print('voxel_size_base ', self.voxel_size_base)
print('voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'geo_rgb_dim':self.geo_rgb_dim,
# 's_start': self.s_start,
# 's_ratio': self.s_ratio,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.sdf.grid.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.sdf.grid.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.sdf.grid.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.sdf.scale_volume_grid(self.world_size)
if self.k0_dim > 0:
self.k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
print('scale_volume_grid finish')
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, sdf_thrd=0.999):
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, self.nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[self.nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
if self.rgbnet is not None:
v = self.k0.grid
else:
v = torch.sigmoid(self.k0.grid)
tv = 0
if k0_tv > 0:
tv += total_variation(v, self.nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def neus_sdf_gradient(self, mode=None, sdf=None):
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
# use sobel operator for gradient seems basically the same as the naive solution
for param in self.grad_conv.parameters():
assert not param.requires_grad
pass
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def neus_alpha_from_sdf(self, viewdirs, steps, sdf, gradients, global_step, is_train, use_mid=True):
ori_shape = viewdirs.shape
n_samples = steps.shape[-1]
# force s_val value to change with global step
if is_train:
batch_size = steps.shape[0]
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
dirs = viewdirs.reshape(-1, 3)
steps = steps.reshape(-1, n_samples)
batch_size = dirs.shape[0]
s_val = 0
if steps.shape[0] == 1:
steps = steps.repeat(batch_size,1)
dirs = viewdirs.unsqueeze(-2)
inv_s = torch.ones(1).cuda() / self.s_val # * torch.exp(-inv_s)
inv_s = inv_s.expand(batch_size * n_samples, 1)
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
iter_cos = iter_cos.reshape(-1,1)
sdf = sdf.reshape(-1, 1)
# calculate dist from steps / z_vals
dists = steps[..., 1:] - steps[..., :-1]
dists = torch.cat([dists, torch.Tensor([dists.mean()]).expand(dists[..., :1].shape)], -1)
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
else:
estimated_next_sdf = torch.cat([sdf[...,1:], sdf[...,-1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[...,:1], sdf[...,:-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, smooth=False, displace=0.):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if displace !=0:
ind_norm[...,:] += displace * self.voxel_size
# TODO: use `rearrange' to make it readable
if smooth:
grid = self.smooth_conv(grids[0])
else:
grid = grids[0]
ret_lst = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners
).reshape(grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze()
return ret_lst
def sample_ray_cuda(self, rays_o, rays_d, near, far, stepsize, maskout=True, use_bg=False, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
if not use_bg:
stepdist = stepsize * self.voxel_size
else:
stepdist = stepsize * self.voxel_size_bg
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
if maskout:
if not use_bg:
mask_inbbox = ~mask_outbbox
else:
mask_inbbox = mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def sample_ray_ori(self, rays_o, rays_d, near, far, stepsize, is_train=False, **render_kwargs):
'''Sample query points on rays'''
# 1. determine the maximum number of query points to cover all possible rays
N_samples = int(np.linalg.norm(np.array(self.sdf.grid.shape[2:])+1) / stepsize) + 1
# 2. determine the two end-points of ray bbox intersection
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.xyz_max - rays_o) / vec
rate_b = (self.xyz_min - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1).clamp(min=near, max=far)
# 3. check wheter a raw intersect the bbox or not
mask_outbbox = (t_max <= t_min)
# 4. sample points on each ray
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * self.voxel_size * rng
interpx = (t_min[...,None] + step/rays_d.norm(dim=-1,keepdim=True))
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
# 5. update mask for query points outside bbox
mask_outbbox = mask_outbbox[...,None] | ((self.xyz_min>rays_pts) | (rays_pts>self.xyz_max)).any(dim=-1)
return rays_pts, mask_outbbox, step
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
N = len(rays_o)
ray_pts, ray_id, step_id, mask_outbbox, N_steps = self.sample_ray_cuda(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
# interval = render_kwargs['stepsize'] * self.voxel_size_ratio
# skip known free space
if self.mask_cache is not None:
mask = self.mask_cache(ray_pts)
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
mask_outbbox[~mask_outbbox] |= ~mask
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
sdf = self.grid_sampler(ray_pts, sdf_grid)
self.gradient = self.neus_sdf_gradient(sdf=self.sdf.grid)
gradient = self.grid_sampler(ray_pts, self.gradient)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, alpha = self.neus_alpha_from_sdf_scatter(viewdirs, ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
if self.fast_color_thres > 0:
mask = weights > self.fast_color_thres
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
alpha = alpha[mask]
gradient = gradient[mask]
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
rgb_feat = []
if not self.rgbnet_full_implicit:
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2)[ray_id])
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
cum_weights = segment_coo(
src=(weights.unsqueeze(-1)),
index=ray_id, out=torch.zeros([N, 1]), reduce='sum')
# Ray marching
rgb_marched = rgb_marched + (1 - cum_weights) * render_kwargs['bg']
# rgb_marched = rgb_marched + alphainv_last.unsqueeze(-1) * render_kwargs['bg']
rgb_marched = rgb_marched.clamp(0, 1)
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(weights.unsqueeze(-1) * normal),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if getattr(render_kwargs, 'render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(weights * step_id * dist),
index=ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = None
ret_dict.update({
'alphainv_cum': alphainv_last,
'weights': weights,
'rgb_marched': rgb_marched,
'normal_marched': normal_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": None,
"s_val": s_val
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
### coarse-stage geometry and texture are low in resolution
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
self.gradient = self.neus_sdf_gradient(sdf=sdf_grid)
gradient = self.grid_sampler(ray_pts, self.gradient).reshape(-1, 3)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
rgb_feat = []
k0 = self.k0(ray_pts)
rgb_feat.append(k0)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
rgb_feat.append(xyz_emb)
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rgb_feat.append(viewdirs_emb.flatten(0, -2))
rgb_feat = torch.cat(rgb_feat, -1)
if self.geo_rgb_dim == 3:
rgb_feat = torch.cat([rgb_feat, normal], -1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, **kwargs):
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
# self._set_nonempty_mask()
query_func = lambda pts: self.grid_sampler(pts, -sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.sum() + tv3.sum() + tv4.sum()) / 3 / mask.sum()
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
rays_pts, mask_outbbox, _ = model.sample_ray_ori(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs)
mask_outbbox[~mask_outbbox] |= (~model.mask_cache(rays_pts[~mask_outbbox]))
mask[i:i+CHUNK] &= (~mask_outbbox).any(-1).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
| 40,171 | 42.760349 | 153 | py |
Voxurf | Voxurf-main/lib/load_mobilebrick.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_mobilebrick_data(basedir, normalize=True, reso_level=2, mask=False, white_bg=False):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
render_cameras_name = 'cameras.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normalize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normalize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
if len(mask_paths) > 0:
mask_ = (imageio.imread(mask_paths[i]) / 255.).astype(np.float32)
if mask_.ndim == 3:
all_masks.append(mask_[...,:3])
else:
all_masks.append(mask_[...,None])
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
H, W = imgs[0].shape[:2]
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0]
print("Date original shape: ", H, W)
masks = np.stack(all_masks, 0)
if mask:
assert len(mask_paths) > 0
bg = 1. if white_bg else 0.
imgs = imgs * masks + bg * (1 - masks)
if reso_level > 1:
H, W = int(H / reso_level), int(W / reso_level)
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
if masks is not None:
masks = F.interpolate(torch.from_numpy(masks).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K[:2] /= reso_level
focal /= reso_level
# this is to randomly fetch images.
i_test = [i for i in range(len(imgs)) if (i-3)%8==0]
i_val = i_test
i_train = list(set(np.arange(len(imgs))) - set(i_test))
i_split = [np.array(i_train), np.array(i_val), np.array(i_test)]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats_np[0], masks
| 3,429 | 34.360825 | 114 | py |
Voxurf | Voxurf-main/lib/utils.py | import os, math
import numpy as np
import scipy.signal
from typing import List, Optional
from torch import Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
from plyfile import PlyData, PlyElement
import matplotlib.cm as cm
import matplotlib as matplotlib
import imageio
import logging
from torch.jit._builtins import math
from skimage import measure
import trimesh
from . import grid
def get_root_logger(log_level=logging.INFO, handlers=()):
logger = logging.getLogger()
if not logger.hasHandlers():
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=log_level)
for handler in handlers:
logger.addHandler(handler)
logger.setLevel(log_level)
return logger
def file_backup(backup_dir):
dir_lis = self.conf['general.recording']
os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if f_name[-3:] == '.py':
copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf'))
''' Misc
'''
mse2psnr = lambda x : -10. * torch.log10(x)
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
''' Extend Adam to support per-voxel learning rate
'''
class Adam(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
self.per_lr = None
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def set_pervoxel_lr(self, count):
assert self.param_groups[0]['params'][0].shape == count.shape
self.per_lr = count.float() / count.max()
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
per_lrs = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
grads.append(p.grad)
if self.per_lr is not None and p.shape == self.per_lr.shape:
per_lrs.append(self.per_lr)
else:
per_lrs.append(None)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group['amsgrad'],
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
per_lrs=per_lrs)
return loss
def adam(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
per_lrs):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
per_lr = per_lrs[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
if per_lr is not None:
param.addcdiv_(exp_avg * per_lr, denom, value=-step_size)
else:
param.addcdiv_(exp_avg, denom, value=-step_size)
def create_optimizer_or_freeze_model(model, cfg_train, global_step):
decay_steps = cfg_train.lrate_decay * 1000
decay_factor = 0.1 ** (global_step/decay_steps)
param_group = []
for k in cfg_train.keys():
if not k.startswith('lrate_'):
continue
k = k[len('lrate_'):]
if not hasattr(model, k):
continue
param = getattr(model, k)
if param is None:
print(f'create_optimizer_or_freeze_model: param {k} not exist')
continue
lr = getattr(cfg_train, f'lrate_{k}') * decay_factor
if lr > 0:
print(f'create_optimizer_or_freeze_model: param {k} lr {lr}')
if isinstance(param, nn.Module):
param = param.parameters()
param_group.append({'params': param, 'lr': lr, 'name':k})
else:
print(f'create_optimizer_or_freeze_model: param {k} freeze')
param.requires_grad = False
return Adam(param_group, betas=(0.9,0.99))
''' Checkpoint utils
'''
def load_checkpoint(model, optimizer, ckpt_path, no_reload_optimizer, strict=True):
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
if model.rgbnet[0].weight.shape != ckpt['model_state_dict']['rgbnet.0.weight'].shape:
tmp_weight = torch.zeros(model.rgbnet[0].weight.shape)
h = ckpt['model_state_dict']['rgbnet.0.weight'].shape[-1]
tmp_weight[:,:h] = ckpt['model_state_dict']['rgbnet.0.weight']
ckpt['model_state_dict']['rgbnet.0.weight'] = tmp_weight
model.load_state_dict(ckpt['model_state_dict'], strict=strict)
if not no_reload_optimizer:
try:
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
except:
print("Failed to load optimizer state dict")
if strict:
raise ValueError
else:
print("Skip!")
return model, optimizer, start
def load_grid_data(model, ckpt_path, deduce=1, name='density', return_raw=False):
ckpt = torch.load(ckpt_path)
module = getattr(model, name)
print(">>> {} loaded from ".format(name), ckpt_path)
if name not in ckpt['model_state_dict']:
name = name + '.grid'
if return_raw:
return ckpt['model_state_dict'][name]
else:
if isinstance(module, grid.DenseGrid):
module.grid.data = ckpt['model_state_dict'][name]
else:
module.data = ckpt['model_state_dict'][name]
return model
def load_weight_by_name(model, ckpt_path, deduce=1, name='density', return_raw=False):
ckpt = torch.load(ckpt_path)
for n, module in model.named_parameters():
if name in n:
if n in ckpt['model_state_dict']:
module.data = ckpt['model_state_dict'][n]
print('load {} to model'.format(n))
print(">>> data with name {} are loaded from ".format(name), ckpt_path)
return model
def load_model(model_class, ckpt_path, new_kwargs=None, strict=False):
ckpt = torch.load(ckpt_path)
if new_kwargs is not None:
for k, v in new_kwargs.items():
if k in ckpt['model_kwargs']:
if ckpt['model_kwargs'][k] != v:
print('updating {} from {} to {}'.format(k, ckpt['model_kwargs'][k], v))
ckpt['model_kwargs'].update(new_kwargs)
model = model_class(**ckpt['model_kwargs'])
try:
model.load_state_dict(ckpt['model_state_dict'], strict=True)
print(">>> Checkpoint loaded successfully from {}".format(ckpt_path))
except Exception as e:
print(e)
if strict:
print(">>> Failed to load checkpoint correctly.")
model.load_state_dict(ckpt['model_state_dict'], strict=True)
else:
model.load_state_dict(ckpt['model_state_dict'], strict=False)
print(">>> Checkpoint loaded without strict matching from {}".format(ckpt_path))
return model
def color_map_color(value, cmap_name='coolwarm', vmin=0, vmax=1):
# norm = plt.Normalize(vmin, vmax)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.get_cmap(cmap_name) # PiYG
rgb = cmap(norm(abs(value)))[:,:3] # will return rgba, we take only first 3 so we get rgb
return rgb
def analyze_point_cloud(filename=None, log_num=18, rand_offset=False,
query_func=None, scale_mats_np=None, save_root=''):
plydata = PlyData.read(filename)
num_points = 2 ** log_num
skip = len(plydata['vertex']) // num_points
idx = np.arange(len(plydata['vertex']))[::skip]
if rand_offset:
rand = np.random.randint(skip)
idx[:-1] += rand
points = np.vstack([[v[0],v[1],v[2]] for v in plydata['vertex'][idx]])
if query_func is None:
return points
if scale_mats_np is not None:
point_ = (points - scale_mats_np[:3,3]) / scale_mats_np[0,0]
else:
point_ = points
batch_size = 8192
sdfs = []
for i in range(int(np.ceil(len(points) / batch_size))):
pts = torch.from_numpy(point_[i*batch_size : (i+1)*batch_size]).cuda()
sdf = -query_func(pts)
sdfs.append(sdf.cpu().numpy())
sdfs = np.hstack(sdfs)
colors = (color_map_color(sdfs * 0.5 + 0.5) * 255).astype(np.uint8)
vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
PlyData([el]).write(os.path.join(save_root, "gt_pcd_eval.ply"))
print(">>> Points saved at {}".format(os.path.join(save_root, "gt_pcd_eval.ply")))
return
def load_point_cloud(filename=None, log_num=17, rand_offset=False, load_normal=False, save_root=''):
plydata = PlyData.read(filename)
num_points = 2 ** log_num
if log_num > 0:
skip = len(plydata['vertex']) // num_points
else:
skip = 1
idx = np.arange(len(plydata['vertex']))[::skip]
if rand_offset:
rand = np.random.randint(skip)
idx[:-1] += rand
points = np.vstack([[v[0],v[1],v[2]] for v in plydata['vertex'][idx]])
if load_normal:
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
radius = 3
# import ipdb; ipdb.set_trace()
pcd.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=radius,
max_nn=30))
normals = np.asarray(pcd.normals)
normals[:,2], normals[:,2] = normals[:,1], normals[:,2]
else:
normals = points / np.linalg.norm(points, 2, -1, True)
colors = ((normals * 0.5 + 0.5) * 255).astype(np.uint8)
vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
PlyData([el]).write(os.path.join(save_root, "est_normal.ply"))
print(">>> Points saved at {}".format(os.path.join(save_root, "est_normal.ply")))
exit()
return
# def write_ply(points, normals=None, colors=None, save_root=''):
# # from plyfile import PlyData, PlyElement
# vertexs = np.array([tuple(v) for v in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
# if colors is not None:
# vertex_colors = np.array([tuple(v) for v in colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
# vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
# for prop in vertex_colors.dtype.names:
# vertex_all[prop] = vertex_colors[prop]
# else:
# vertex_all = np.empty(len(vertexs), vertexs.dtype.descr)
# for prop in vertexs.dtype.names:
# vertex_all[prop] = vertexs[prop]
# el = PlyElement.describe(vertex_all, 'vertex')
# PlyData([el]).write(os.path.join(save_root, "tmp.ply"))
def write_ply(points, filename, colors=None, normals=None):
vertex = np.array([tuple(p) for p in points], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
n = len(vertex)
desc = vertex.dtype.descr
if normals is not None:
vertex_normal = np.array([tuple(n) for n in normals], dtype=[('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4')])
assert len(vertex_normal) == n
desc = desc + vertex_normal.dtype.descr
if colors is not None:
vertex_color = np.array([tuple(c * 255) for c in colors],
dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
assert len(vertex_color) == n
desc = desc + vertex_color.dtype.descr
vertex_all = np.empty(n, dtype=desc)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
if normals is not None:
for prop in vertex_normal.dtype.names:
vertex_all[prop] = vertex_normal[prop]
if colors is not None:
for prop in vertex_color.dtype.names:
vertex_all[prop] = vertex_color[prop]
ply = PlyData([PlyElement.describe(vertex_all, 'vertex')], text=False)
# if not os.path.exists(os.path.dirname(filename)):
# os.makedirs(os.path.dirname(filename))
ply.write(filename)
def point_cloud_from_rays(ray_pts, weights, normals):
import ipdb; ipdb.set_trace()
''' color space process methods
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class GradLayer(nn.Module):
def __init__(self, ksize=3):
super(GradLayer, self).__init__()
self.ksize = ksize
if ksize == 0:
kernel_v = np.asarray(
[[-1, 0],
[ 1, 0]])
kernel_h = np.asarray(
[[-1, 1],
[ 0, 0]])
elif ksize == 1:
kernel_v = np.asarray(
[[0, -1, 0],
[0, 0, 0],
[0, 1, 0]])
kernel_h = np.asarray(
[[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]])
self.ksize = 3
elif ksize == 3:
# kernel_v = np.asarray(
# [[0, -1, 0],
# [0, 0, 0],
# [0, 1, 0]])
# kernel_h = np.asarray(
# [[0, 0, 0],
# [-1, 0, 1],
# [0, 0, 0]])
# sobel
kernel_v = np.asarray(
[[-1,-2,-1],
[0, 0, 0],
[1, 2, 1]])
kernel_h = np.asarray(
[[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
elif ksize == 5:
kernel_v = np.asarray(
[[-1, -4, -6, -4, -1],
[-2, -8, -12, -8, -2],
[ 0, 0, 0, 0, 0],
[ 2, 8, 12, 8, 2],
[ 1, 4, 6, 4, 1],
])
kernel_h = kernel_v.T
else:
raise NotImplementedError
kernel_v = torch.FloatTensor(kernel_v/np.abs(kernel_v).sum()).unsqueeze(0).unsqueeze(0)
kernel_h = torch.FloatTensor(kernel_h/np.abs(kernel_h).sum()).unsqueeze(0).unsqueeze(0)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
def get_gray(self,x):
'''
Convert image to its gray one.
'''
gray_coeffs = [65.738, 129.057, 25.064]
convert = x.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
x_gray = x.mul(convert).sum(dim=1)
return x_gray.unsqueeze(1)
def forward(self, x):
if x.shape[1] == 3:
x = self.get_gray(x)
if self.ksize == 0:
x_v = torch.zeros_like(x)
x_h = torch.zeros_like(x)
x_v[...,1:,:] = (x[...,1:,:] - x[...,:-1,:]) / 2
x_h[...,1:] = (x[...,1:] - x[...,:-1]) / 2
else:
x_v = F.conv2d(x, self.weight_v, padding=self.ksize//2)
x_h = F.conv2d(x, self.weight_h, padding=self.ksize//2)
# x = torch.sqrt(torch.pow(x_v, 2) + torch.pow(x_h, 2) + 1e-6)
x = torch.cat([x_v, x_h],1)
return x
class GaussianLayer(nn.Module):
def __init__(self, ksize=3):
super(GaussianLayer, self).__init__()
self.ksize = ksize
if ksize == 3:
kernel = np.asarray(
[[1, 2, 1],
[2, 4, 2],
[1, 2, 1]])
elif ksize == 5:
kernel = np.asarray(
[[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1],
])
else:
raise NotImplementedError
kernel = torch.FloatTensor(kernel/np.abs(kernel).sum()).unsqueeze(0).unsqueeze(0)
self.weight = nn.Parameter(data=kernel, requires_grad=False)
def forward(self, x):
x = F.conv2d(x, self.weight, padding=self.ksize//2)
return x
def _gaussian_3dconv(ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).cuda()
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
# print(kernel)
return m
class GradLoss(nn.Module):
def __init__(self, ksize=3, gaussian=True):
super(GradLoss, self).__init__()
self.loss = nn.MSELoss()
self.grad_layer = GradLayer(ksize=ksize)
self.gaussian = gaussian
if self.gaussian:
self.gaussian_layer = GaussianLayer(ksize=3)
def forward(self, output, gt_img, savedir=''):
if self.gaussian:
# output = self.gaussian_layer(output)
gt_img = self.gaussian_layer(gt_img)
output_grad = self.grad_layer(output)
gt_grad = self.grad_layer(gt_img)
loss = self.loss(output_grad, gt_grad)
if savedir:
img1 = np.concatenate([to8b(gt_img.detach().cpu().numpy())[0,0][...,None],
to8b(5*gt_grad.detach().cpu().numpy())[0,0][...,None],
to8b(5*gt_grad.detach().cpu().numpy())[0,1][...,None]], axis=1)
img2 = np.concatenate([to8b(output.detach().cpu().numpy())[0,0][...,None],
to8b(5*output_grad.detach().cpu().numpy())[0,0][...,None],
to8b(5*output_grad.detach().cpu().numpy())[0,1][...,None]], axis=1)
img8 = np.concatenate([img1, img2], axis=0)
if not os.path.exists(os.path.join(savedir, "debug_figs")):
os.mkdir(os.path.join(savedir, "debug_figs"))
imageio.imwrite(os.path.join(savedir, "debug_figs", "grad_module_{}.png".format(loss)), img8)
return loss
def rgb_to_luminance(rgb, return_chromaticity=False, gamma_correction=False, lum_avg=1):
# todo: gamma correction?
luminance = 0.299 * rgb[...,0] + 0.587 * rgb[...,1] + 0.114 * rgb[...,2]
luminance = luminance / lum_avg
if return_chromaticity:
chromaticity = rgb / (luminance[..., None] + 1e-5)
return luminance[..., None], chromaticity
return luminance[..., None]
def get_sobel(img, ksize=3, thrd=0.1, g_ksize=0, d_ksize=0, suffix='', vis=False):
if img.shape[-1] > 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_sobel_x = cv2.Sobel(img, -1, 1, 0, ksize=ksize)
img_sobel_y = cv2.Sobel(img, -1, 0, 1, ksize=ksize)
# img_sobel_xy = cv2.Sobel(img, -1, 1, 1, ksize=ksize)
absx = cv2.convertScaleAbs(img_sobel_x)
absy = cv2.convertScaleAbs(img_sobel_y)
sobel = cv2.addWeighted(absx, 0.5, absy, 0.5,0)
if g_ksize > 0:
gaussian = cv2.GaussianBlur(sobel,(g_ksize,g_ksize),0)
else:
gaussian = sobel
if vis:
titles = ['Original', 'Sobel','gaussian']
images = [img, sobel, gaussian]
for i in range(len(titles)):
plt.subplot(1, len(titles), i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.savefig("debug_figs/test_sobel_k{}_{}.jpg".format(ksize, suffix), dpi=200)
return gaussian
def calc_grad(img, delta=1, padding=None, kernel=None):
# image: (H, W, C)
grad_x = (img[delta:] - img[:-delta]) / delta
grad_y = (img[: ,delta:] - img[:, :-delta]) / delta
mid = delta // 2 + 1
if padding is None:
grad = torch.cat([grad_x[:, delta:], grad_y[delta:, :]], -1)
else:
raise NotImplementedError
return grad
''' Evaluation metrics (ssim, lpips)
'''
def rgb_ssim(img0, img1, max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
# Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58
assert len(img0.shape) == 3
assert img0.shape[-1] == 3
assert img0.shape == img1.shape
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = np.exp(-0.5 * f_i)
filt /= np.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return scipy.signal.convolve2d(z, f, mode='valid')
filt_fn = lambda z: np.stack([
convolve2d(convolve2d(z[...,i], filt[:, None]), filt[None, :])
for i in range(z.shape[-1])], -1)
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = np.maximum(0., sigma00)
sigma11 = np.maximum(0., sigma11)
sigma01 = np.sign(sigma01) * np.minimum(
np.sqrt(sigma00 * sigma11), np.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = np.mean(ssim_map)
return ssim_map if return_map else ssim
__LPIPS__ = {}
def init_lpips(net_name, device):
assert net_name in ['alex', 'vgg']
import lpips
print(f'init_lpips: lpips_{net_name}')
return lpips.LPIPS(net=net_name, version='0.1').eval().to(device)
def rgb_lpips(np_gt, np_im, net_name, device):
if net_name not in __LPIPS__:
__LPIPS__[net_name] = init_lpips(net_name, device)
gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device)
im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device)
return __LPIPS__[net_name](gt, im, normalize=True).item()
"""
Sampling strategies
"""
def up_sample(rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
"""
Up sampling give a fixed inv_s
copied from neus
"""
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
# ----------------------------------------------------------------------------------------------------------
# Use min value of [ cos, prev_cos ]
# Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more
# robust when meeting situations like below:
#
# SDF
# ^
# |\ -----x----...
# | \ /
# | x x
# |---\----/-------------> 0 level
# | \ /
# | \/
# |
# ----------------------------------------------------------------------------------------------------------
prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1)
cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
next_esti_sdf = mid_sdf + cos_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples)
u = u.expand(list(cdf.shape[:-1]) + [n_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [n_samples])
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
"""
Ref-NeRF utils
"""
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2**torch.arange(min_deg, max_deg)
shape = x.shape[:-1] + (int(x.shape[1] * scales.shape[0]),)
scaled_x = (x[..., None, :] * scales[:, None]).reshape(shape)
four_feat = torch.sin(
torch.cat([scaled_x, scaled_x + 0.5 * torch.pi], axis=-1))
if append_identity:
return torch.cat([x] + [four_feat], axis=-1)
else:
return four_feat
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / np.math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return ((-1)**m * 2**l * np.math.factorial(l) / np.math.factorial(k) /
np.math.factorial(l - k - m) *
generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l))
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return (np.sqrt(
(2.0 * l + 1.0) * np.math.factorial(l - m) /
(4.0 * np.pi * np.math.factorial(l + m))) * assoc_legendre_coeff(l, m, k))
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2**(deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = torch.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
ml_array = torch.from_numpy(ml_array).cuda()
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
# Compute z Vandermonde matrix.
vmz = torch.cat([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = torch.cat([(x + 1j * y)**m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * torch.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * torch.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return torch.cat([torch.real(ide), torch.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_enc_fn(mode, deg_view):
if mode == 'pos_enc':
def dir_enc_fn(direction, _):
return pos_enc(
direction, min_deg=0, max_deg=deg_view, append_identity=True)
return dir_enc_fn, 3 + 3 * deg_view * 2
elif mode == 'ide':
ide_dims = [4, 10, 20, 38]
return generate_ide_fn(deg_view), ide_dims[deg_view-1]
else:
raise NameError
# def generate_dir_enc_fn(deg_view):
# """Generate directional encoding (DE) function.
# Args:
# deg_view: number of spherical harmonics degrees to use.
# Returns:
# A function for evaluating directional encoding.
# """
# integrated_dir_enc_fn = generate_ide_fn(deg_view)
#
# def dir_enc_fn(xyz):
# """Function returning directional encoding (DE)."""
# return integrated_dir_enc_fn(xyz, torch.zeros_like(xyz[..., :1]))
#
# return dir_enc_fn
@torch.no_grad()
def get_surface_sliding(sdf, resolution=512, grid_boundary=[-1.1, 1.1], level=0):
avg_pool_3d = torch.nn.AvgPool3d(2, stride=2)
upsample = torch.nn.Upsample(scale_factor=2, mode='nearest')
assert resolution % 512 == 0
resN = resolution
cropN = 512
level = 0
N = resN // cropN
grid_min = [grid_boundary[0], grid_boundary[0], grid_boundary[0]]
grid_max = [grid_boundary[1], grid_boundary[1], grid_boundary[1]]
xs = np.linspace(grid_min[0], grid_max[0], N+1)
ys = np.linspace(grid_min[1], grid_max[1], N+1)
zs = np.linspace(grid_min[2], grid_max[2], N+1)
print(xs)
print(ys)
print(zs)
meshes = []
for i in range(N):
for j in range(N):
for k in range(N):
print(i, j, k)
x_min, x_max = xs[i], xs[i+1]
y_min, y_max = ys[j], ys[j+1]
z_min, z_max = zs[k], zs[k+1]
x = np.linspace(x_min, x_max, cropN)
y = np.linspace(y_min, y_max, cropN)
z = np.linspace(z_min, z_max, cropN)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).cuda()
def evaluate(points):
z = []
for _, pnts in enumerate(torch.split(points, 100000, dim=0)):
z.append(sdf(pnts))
z = torch.cat(z, axis=0)
return z
# construct point pyramids
points = points.reshape(cropN, cropN, cropN, 3).permute(3, 0, 1, 2)
points_pyramid = [points]
for _ in range(3):
points = avg_pool_3d(points[None])[0]
points_pyramid.append(points)
points_pyramid = points_pyramid[::-1]
# evalute pyramid with mask
mask = None
threshold = 2 * (x_max - x_min)/cropN * 8
for pid, pts in enumerate(points_pyramid):
coarse_N = pts.shape[-1]
pts = pts.reshape(3, -1).permute(1, 0).contiguous()
if mask is None:
pts_sdf = evaluate(pts)
else:
mask = mask.reshape(-1)
pts_to_eval = pts[mask]
#import pdb; pdb.set_trace()
if pts_to_eval.shape[0] > 0:
pts_sdf_eval = evaluate(pts_to_eval.contiguous())
pts_sdf[mask] = pts_sdf_eval
print("ratio", pts_to_eval.shape[0] / pts.shape[0])
if pid < 3:
# update mask
mask = torch.abs(pts_sdf) < threshold
mask = mask.reshape(coarse_N, coarse_N, coarse_N)[None, None]
mask = upsample(mask.float()).bool()
pts_sdf = pts_sdf.reshape(coarse_N, coarse_N, coarse_N)[None, None]
pts_sdf = upsample(pts_sdf)
pts_sdf = pts_sdf.reshape(-1)
threshold /= 2.
z = pts_sdf.detach().cpu().numpy()
if (not (np.min(z) > level or np.max(z) < level)):
z = z.astype(np.float32)
verts, faces, normals, values = measure.marching_cubes(
volume=z.reshape(cropN, cropN, cropN), #.transpose([1, 0, 2]),
level=level,
spacing=(
(x_max - x_min)/(cropN-1),
(y_max - y_min)/(cropN-1),
(z_max - z_min)/(cropN-1) ))
print(np.array([x_min, y_min, z_min]))
print(verts.min(), verts.max())
verts = verts + np.array([x_min, y_min, z_min])
print(verts.min(), verts.max())
meshcrop = trimesh.Trimesh(verts, faces, normals)
#meshcrop.export(f"{i}_{j}_{k}.ply")
meshes.append(meshcrop)
combined = trimesh.util.concatenate(meshes)
return combined
# copy from MiDaS
def compute_scale_and_shift(prediction, target, mask):
# system matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1
def reduction_batch_based(image_loss, M):
# average of all valid pixels of the batch
# avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def reduction_image_based(image_loss, M):
# mean of average of valid pixels of an image
# avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
class GradientLoss(nn.Module):
def __init__(self, scales=4, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
self.__scales = scales
def forward(self, prediction, target, mask):
total = 0
for scale in range(self.__scales):
step = pow(2, scale)
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],
mask[:, ::step, ::step], reduction=self.__reduction)
return total
class ScaleAndShiftInvariantLoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based', ema_scale_shift=False, momentum=0.9, detach_scale_shift=False):
super().__init__()
self.__data_loss = MSELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
self.__prediction_ssi = None
self.ema_scale_shift = ema_scale_shift
self.detach_scale_shift = detach_scale_shift
self.momentum = momentum
if self.ema_scale_shift:
self.register_buffer('scale', torch.tensor([0]).float())
self.register_buffer('shift', torch.tensor([0]).float())
def forward(self, prediction, target, mask, share_scale_shift=False):
if share_scale_shift:
prediction_ = prediction.view(1, -1, prediction.size(-1))
target_ = target.view(1, -1, target.size(-1))
mask_ = mask.view(1, -1, mask.size(-1))
scale_, shift_ = compute_scale_and_shift(prediction_, target_, mask_)
if self.detach_scale_shift:
scale_ = scale_.detach()
shift_ = shift_.detach()
if self.ema_scale_shift:
if self.scale.item() == 0:
self.scale.data = scale_
if self.shift.item() == 0:
self.shift.data = shift_
self.scale.data = self.momentum * self.scale.data + (1 - self.momentum) * scale_
self.shift.data = self.momentum * self.shift.data + (1 - self.momentum) * shift_
scale = self.scale.expand(prediction.size(0))
shift = self.shift.expand(prediction.size(0))
else:
scale = scale_.expand(prediction.size(0))
shift = shift_.expand(prediction.size(0))
else:
scale, shift = compute_scale_and_shift(prediction, target, mask)
self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
total = self.__data_loss(self.__prediction_ssi, target, mask)
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)
return total
def __get_prediction_ssi(self):
return self.__prediction_ssi
prediction_ssi = property(__get_prediction_ssi)
# end copy | 45,758 | 36.848635 | 132 | py |
Voxurf | Voxurf-main/lib/ref_utils.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
# from . import math
# import jax.numpy as jnp
import numpy as np
import torch
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return 2.0 * jnp.sum(
normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs
def l2_normalize(x, eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis."""
return x / jnp.sqrt(jnp.maximum(jnp.sum(x**2, axis=-1, keepdims=True), eps))
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
one_eps = 1 - jnp.finfo(jnp.float32).eps
return (weights * jnp.arccos(
jnp.clip((normals * normals_gt).sum(-1), -one_eps,
one_eps))).sum() / weights.sum() * 180.0 / jnp.pi
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / np.math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return ((-1)**m * 2**l * np.math.factorial(l) / np.math.factorial(k) /
np.math.factorial(l - k - m) *
generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l))
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return (np.sqrt(
(2.0 * l + 1.0) * np.math.factorial(l - m) /
(4.0 * np.pi * np.math.factorial(l + m))) * assoc_legendre_coeff(l, m, k))
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2**(deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y)**m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, torch.zeros_like(xyz[..., :1]))
return dir_enc_fn | 6,003 | 35.609756 | 84 | py |
Voxurf | Voxurf-main/lib/load_co3d.py | import os
import json
import gzip
import glob
import torch
import numpy as np
import imageio
import torch.nn.functional as F
import cv2
def load_co3d_data(cfg):
# load meta
with gzip.open(cfg.annot_path, 'rt', encoding='utf8') as zipfile:
annot = [v for v in json.load(zipfile) if v['sequence_name'] == cfg.sequence_name]
with open(cfg.split_path) as f:
split = json.load(f)
train_im_path = set()
test_im_path = set()
for k, lst in split.items():
for v in lst:
if v[0] == cfg.sequence_name:
if 'known' in k:
train_im_path.add(v[-1])
else:
test_im_path.add(v[-1])
assert len(annot) == len(train_im_path) + len(test_im_path), 'Mismatch: '\
f'{len(annot)} == {len(train_im_path) + len(test_im_path)}'
# load datas
imgs = []
masks = []
poses = []
Ks = []
i_split = [[], []]
remove_empty_masks_cnt = [0, 0]
for i, meta in enumerate(annot):
im_fname = meta['image']['path']
assert im_fname in train_im_path or im_fname in test_im_path
sid = 0 if im_fname in train_im_path else 1
if meta['mask']['mass'] == 0:
remove_empty_masks_cnt[sid] += 1
continue
im_path = os.path.join(cfg.datadir, im_fname)
mask_path = os.path.join(cfg.datadir, meta['mask']['path'])
mask = imageio.imread(mask_path) / 255.
if mask.max() < 0.5:
remove_empty_masks_cnt[sid] += 1
continue
Rt = np.concatenate([meta['viewpoint']['R'], np.array(meta['viewpoint']['T'])[:,None]], 1)
pose = np.linalg.inv(np.concatenate([Rt, [[0,0,0,1]]]))
imgs.append(imageio.imread(im_path) / 255.)
masks.append(mask)
poses.append(pose)
assert imgs[-1].shape[:2] == tuple(meta['image']['size'])
half_image_size_wh = np.float32(meta['image']['size'][::-1]) * 0.5
principal_point = np.float32(meta['viewpoint']['principal_point'])
focal_length = np.float32(meta['viewpoint']['focal_length'])
principal_point_px = -1.0 * (principal_point - 1.0) * half_image_size_wh
focal_length_px = focal_length * half_image_size_wh
Ks.append(np.array([
[focal_length_px[0], 0, principal_point_px[0]],
[0, focal_length_px[1], principal_point_px[1]],
[0, 0, 1],
]))
i_split[sid].append(len(imgs)-1)
if sum(remove_empty_masks_cnt) > 0:
print('load_co3d_data: removed %d train / %d test due to empty mask' % tuple(remove_empty_masks_cnt))
print(f'load_co3d_data: num images {len(i_split[0])} train / {len(i_split[1])} test')
imgs = np.array(imgs)
masks = np.array(masks)
poses = np.stack(poses, 0)
Ks = np.stack(Ks, 0)
render_poses = poses[i_split[-1]]
i_split.append(i_split[-1])
# visyalization hwf
H, W = np.array([im.shape[:2] for im in imgs]).mean(0).astype(int)
focal = Ks[:,[0,1],[0,1]].mean()
return imgs, masks, poses, render_poses, [H, W, focal], Ks, i_split
| 3,135 | 35.465116 | 109 | py |
Voxurf | Voxurf-main/lib/load_tankstemple.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_tankstemple_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
""" delete single side """
ref_pos = poses[0][:,-1]
dist = ((poses[:,:,-1] - ref_pos[None]) ** 2).sum(-1)
i_select = np.argsort(dist)[:50]
i_split[0] = i_select.tolist()
path_traj = os.path.join(basedir, 'test_traj.txt')
if os.path.isfile(path_traj):
render_poses = torch.Tensor(np.loadtxt(path_traj).reshape(-1,4,4).astype(np.float32))
else:
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split
def normalize(x):
return x / np.linalg.norm(x)
def load_tankstemple_data_bound(basedir, movie_render_kwargs={}):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_poses = []
all_imgs = []
i_split = [[], []]
for i, (pose_path, rgb_path) in enumerate(zip(pose_paths, rgb_paths)):
i_set = int(os.path.split(rgb_path)[-1][0])
all_poses.append(np.loadtxt(pose_path).astype(np.float32))
all_imgs.append((imageio.imread(rgb_path) / 255.).astype(np.float32))
i_split[i_set].append(i)
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
i_split.append(i_split[-1])
path_intrinsics = os.path.join(basedir, 'intrinsics.txt')
H, W = imgs[0].shape[:2]
K = np.loadtxt(path_intrinsics)
focal = float(K[0,0])
### generate spiral poses for rendering fly-through movie
centroid = poses[:,:3,3].mean(0)
radcircle = movie_render_kwargs.get('scale_r', 1.0) * np.linalg.norm(poses[:,:3,3] - centroid, axis=-1).mean()
centroid[0] += movie_render_kwargs.get('shift_x', 0)
centroid[1] += movie_render_kwargs.get('shift_y', 0)
centroid[2] += movie_render_kwargs.get('shift_z', 0)
new_up_rad = movie_render_kwargs.get('pitch_deg', 0) * np.pi / 180
target_y = radcircle * np.tan(new_up_rad)
render_poses = []
for th in np.linspace(0., 2.*np.pi, 200):
camorigin = np.array([radcircle * np.cos(th), 0, radcircle * np.sin(th)])
if movie_render_kwargs.get('flip_up_vec', False):
up = np.array([0,-1.,0])
else:
up = np.array([0,1.,0])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin + centroid
# rotate to align with new pitch rotation
lookat = -vec2
lookat[1] = target_y
lookat = normalize(lookat)
lookat *= -1
vec2 = -lookat
vec1 = normalize(np.cross(vec2, vec0))
p = np.stack([vec0, vec1, vec2, pos], 1)
render_poses.append(p)
render_poses = np.stack(render_poses, 0)
render_poses = np.concatenate([render_poses, np.broadcast_to(poses[0,:3,-1:], render_poses[:,:3,-1:].shape)], -1)
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 3,813 | 32.752212 | 117 | py |
Voxurf | Voxurf-main/lib/load_scannet.py | import os
import torch
import torch.nn.functional as F
import numpy as np
from glob import glob
import cv2
import random
import imageio
import skimage
def load_rgb(path, normalize_rgb = False):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
# if normalize_rgb: # [-1,1] --> [0,1]
# img -= 0.5
# img *= 2.
return img
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv2.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K/K[2,2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3,3] = (t[:3] / t[3])[:,0]
return intrinsics, pose
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
def glob_data(data_dir):
data_paths = []
data_paths.extend(glob(data_dir))
data_paths = sorted(data_paths)
return data_paths
def load_scannet_data(data_dir, img_res=[384, 384], center_crop_type='no_crop', use_mask=False, num_views=-1):
# instance_dir = os.path.join(data_dir, 'scan{0}'.format(scan_id))
instance_dir = data_dir
total_pixels = img_res[0] * img_res[1]
img_res = img_res
num_views = num_views
assert num_views in [-1, 3, 6, 9]
assert os.path.exists(instance_dir), "Data directory is empty"
image_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_rgb.png"))
depth_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_depth.npy"))
normal_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_normal.npy"))
# mask is only used in the replica dataset as some monocular depth predictions have very large error and we ignore it
if use_mask:
mask_paths = glob_data(os.path.join('{0}'.format(instance_dir), "*_mask.npy"))
else:
mask_paths = None
n_images = len(image_paths)
cam_file = '{0}/cameras.npz'.format(instance_dir)
camera_dict = np.load(cam_file)
scale_mats = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
world_mats = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# cam_file_2 = '{0}/cameras_sphere.npz'.format(instance_dir)
# camera_dict_2 = np.load(cam_file_2)
# scale_mats_2 = [camera_dict_2['scale_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# world_mats_2 = [camera_dict_2['world_mat_%d' % idx].astype(np.float32) for idx in range(n_images)]
# for i in range(n_images):
# assert np.sum(np.abs(scale_mats[i] - scale_mats_2[i])) == 0
# assert np.sum(np.abs(world_mats[i] - world_mats_2[i])) == 0
intrinsics_all = []
pose_all = []
for scale_mat, world_mat in zip(scale_mats, world_mats):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
# because we do resize and center crop 384x384 when using omnidata model, we need to adjust the camera intrinsic accordingly
if center_crop_type == 'center_crop_for_replica':
scale = 384 / 680
offset = (1200 - 680 ) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'center_crop_for_tnt':
scale = 384 / 540
offset = (960 - 540) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'center_crop_for_dtu':
scale = 384 / 1200
offset = (1600 - 1200) * 0.5
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'padded_for_dtu':
scale = 384 / 1200
offset = 0
intrinsics[0, 2] -= offset
intrinsics[:2, :] *= scale
elif center_crop_type == 'no_crop': # for scannet dataset, we already adjust the camera intrinsic duing preprocessing so nothing to be done here
pass
else:
raise NotImplementedError
intrinsics_all.append(intrinsics)
pose_all.append(pose)
rgb_images = []
for path in image_paths:
rgb = load_rgb(path)
rgb_images.append(rgb)
imgs = np.stack(rgb_images, 0)
poses = np.stack(pose_all, 0)
K = np.stack(intrinsics_all, 0)
K = intrinsics_all[0]
H, W = imgs[0].shape[:2]
focal = intrinsics_all[0][0,0]
depth_images = []
normal_images = []
for dpath, npath in zip(depth_paths, normal_paths):
depth = np.load(dpath)
depth_images.append(depth)
normal = np.load(npath)
# important as the output of omnidata is normalized
normal = normal * 2. - 1.
normal = np.transpose(normal, (1,2,0))
normal_images.append(normal)
depth_images = np.stack(depth_images, 0)
normal_images = np.stack(normal_images, 0)
# load mask
mask_images = []
if mask_paths is None:
for rgb in rgb_images:
mask = np.ones_like(rgb[:, :, :1])
mask_images.append(mask)
else:
for path in mask_paths:
mask = np.load(path)
mask_images.append(mask)
masks = np.stack(mask_images, 0)
i_split = [np.array(np.arange(len(imgs))), np.array(np.arange(0, len(imgs), 10)), np.array(np.arange(0, len(imgs), 10))]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split, scale_mats[0], masks, depth_images, normal_images
# if __name__ == "__main__":
# load_scannet_data('/mnt/petrelfs/wangjiaqi/VoxurF-new/data/scannet/scan1/')
| 6,003 | 31.106952 | 153 | py |
Voxurf | Voxurf-main/lib/voxurf_fine.py | import os
import time
import numpy as np
from copy import deepcopy
import cv2
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
from torch_scatter import segment_coo
from . import grid
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'render_utils.cpp'), os.path.join('cuda', 'render_utils_kernel.cu')]],
verbose=True)
'''Model'''
class Voxurf(torch.nn.Module):
"""
This module is modified from DirectVoxGO https://github.com/sunset1995/DirectVoxGO/blob/main/lib/dvgo.py
"""
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
nearest=False,
mask_cache_path=None, mask_cache_thres=1e-3,
fast_color_thres=0,
rgbnet_dim=0, rgbnet_direct=False, rgbnet_full_implicit=False,
rgbnet_depth=3, rgbnet_width=128,
posbase_pe=5, viewbase_pe=4,
center_sdf=False, grad_feat=(1.0,), sdf_feat=(),
use_layer_norm=False,
grad_mode='interpolate',
s_ratio=2000, s_start=0.2, s_learn=False, step_start=0,
smooth_sdf=False,
smooth_ksize=0, smooth_sigma=1,
k_rgbnet_depth=3, k_res=False, k_posbase_pe=5, k_viewbase_pe=4,
k_center_sdf=False, k_grad_feat=(1.0,), k_sdf_feat=(),
smooth_scale=True, use_grad_norm=True,
use_rgb_k=True, k_detach_1=True, k_detach_2=True,
use_rgbnet_k0=False,
**kwargs):
super(Voxurf, self).__init__()
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.fast_color_thres = fast_color_thres
self.nearest = nearest
self.smooth_scale = smooth_scale
self.s_ratio = s_ratio
self.s_start = s_start
self.s_learn = s_learn
self.step_start = step_start
self.s_val = nn.Parameter(torch.ones(1), requires_grad=s_learn).cuda()
self.s_val.data *= s_start
self.smooth_sdf = smooth_sdf
self.sdf_init_mode = "ball_init"
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine the density bias shift
self.alpha_init = alpha_init
self.act_shift = np.log(1/(1-alpha_init) - 1)
print('dvgo: set density bias shift to', self.act_shift)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
# init density voxel grid
self.density = torch.nn.Parameter(torch.zeros([1, 1, *self.world_size]))
if self.sdf_init_mode == "ball_init":
self.sdf = grid.create_grid(
'DenseGrid', channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
x, y, z = np.mgrid[-1.0:1.0:self.world_size[0].item() * 1j, -1.0:1.0:self.world_size[1].item() * 1j, -1.0:1.0:self.world_size[2].item() * 1j]
self.sdf.grid.data = torch.from_numpy((x ** 2 + y ** 2 + z ** 2) ** 0.5 - 1).float()[None, None, ...]
elif self.sdf_init_mode == "random":
self.sdf = torch.nn.Parameter(torch.rand([1, 1, *self.world_size]) * 0.05) # random initialization
torch.nn.init.normal_(self.sdf, 0.0, 0.5)
else:
raise NotImplementedError
self.init_smooth_conv(smooth_ksize, smooth_sigma)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim, 'rgbnet_direct': rgbnet_direct,
'rgbnet_full_implicit': rgbnet_full_implicit,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
'posbase_pe': posbase_pe, 'viewbase_pe': viewbase_pe,
}
if rgbnet_dim <= 0:
# color voxel grid (dvgo coarse stage)
self.k0_dim = 3
self.rgbnet = None
else:
self.k0_dim = rgbnet_dim
self.k0 = grid.create_grid(
'DenseGrid', channels=self.k0_dim, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
self.rgbnet_direct = rgbnet_direct
self.register_buffer('posfreq', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))
self.register_buffer('viewfreq', torch.FloatTensor([(2**i) for i in range(viewbase_pe)]))
dim0 = (3+3*posbase_pe*2) + (3+3*viewbase_pe*2)
self.use_grad_norm = use_grad_norm
self.center_sdf = center_sdf
self.grad_feat = grad_feat
self.sdf_feat = sdf_feat
self.use_rgb_k = use_rgb_k
self.k_detach_1 = k_detach_1
self.k_detach_2 = k_detach_2
self.use_rgbnet_k0 = use_rgbnet_k0
self.use_layer_norm = use_layer_norm
dim0 += len(self.grad_feat) * 3
dim0 += len(self.sdf_feat) * 6
if self.use_rgbnet_k0:
dim0 += self.k0_dim
if self.center_sdf:
dim0 += 1
if not self.use_layer_norm:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.rgbnet = nn.Sequential(
nn.Linear(dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('mlp', self.rgbnet)
# the second rgb net
self.k_res = k_res
self.k_center_sdf = k_center_sdf
self.k_grad_feat = k_grad_feat
self.k_sdf_feat = k_sdf_feat
self.register_buffer('k_posfreq', torch.FloatTensor([(2**i) for i in range(k_posbase_pe)]))
self.register_buffer('k_viewfreq', torch.FloatTensor([(2**i) for i in range(k_viewbase_pe)]))
k_dim0 = (3+3*k_posbase_pe*2) + (3+3*k_viewbase_pe*2) + self.k0_dim
if self.k_res:
k_dim0 += 3
if self.k_center_sdf:
k_dim0 += 1
k_dim0 += len(self.k_grad_feat) * 3
k_dim0 += len(self.k_sdf_feat) * 6
if not self.use_layer_norm:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
else:
self.k_rgbnet = nn.Sequential(
nn.Linear(k_dim0, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.LayerNorm(rgbnet_width), nn.ReLU(inplace=True))
for _ in range(k_rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.rgbnet[-1].bias, 0)
print('feature voxel grid', self.k0.grid.shape)
print('k_rgbnet mlp', self.k_rgbnet)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
self.mask_cache_path = mask_cache_path
self.mask_cache_thres = mask_cache_thres
if mask_cache_path is not None and mask_cache_path:
self.mask_cache = MaskCache(
path=mask_cache_path,
mask_cache_thres=mask_cache_thres).to(self.xyz_min.device)
self._set_nonempty_mask()
else:
self.mask_cache = None
self.nonempty_mask = None
# grad conv to calculate gradient
self.init_gradient_conv()
self.grad_mode = grad_mode
def init_gradient_conv(self, sigma = 0):
self.grad_conv = nn.Conv3d(1,3,(3,3,3),stride=(1,1,1), padding=(1, 1, 1), padding_mode='replicate')
# fixme: a better operator?
kernel = np.asarray([
[[1,2,1],[2,4,2],[1,2,1]],
[[2,4,2],[4,8,4],[2,4,2]],
[[1,2,1],[2,4,2],[1,2,1]],
])
# sigma controls the difference between naive [-1,1] and sobel kernel
distance = np.zeros((3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
distance[i,j,k] = ((i-1)**2 + (j-1)**2 + (k-1)**2 - 1)
kernel0 = kernel * np.exp(-distance * sigma)
kernel1 = kernel0 / ( kernel0[0].sum() * 2 * self.voxel_size.item())
weight = torch.from_numpy(np.concatenate([kernel1[None] for _ in range(3)])).float()
weight[0,1,:,:] *= 0
weight[0,0,:,:] *= -1
weight[1,:,1,:] *= 0
weight[1,:,0,:] *= -1
weight[2,:,:,1] *= 0
weight[2,:,:,0] *= -1
self.grad_conv.weight.data = weight.unsqueeze(1).float()
self.grad_conv.bias.data = torch.zeros(3)
for param in self.grad_conv.parameters():
param.requires_grad = False
# smooth conv for TV
self.tv_smooth_conv = nn.Conv3d(1, 1, (3, 3, 3), stride=1, padding=1, padding_mode='replicate')
weight = torch.from_numpy(kernel0 / kernel0.sum()).float()
self.tv_smooth_conv.weight.data = weight.unsqueeze(0).unsqueeze(0).float()
self.tv_smooth_conv.bias.data = torch.zeros(1)
for param in self.tv_smooth_conv.parameters():
param.requires_grad = False
self.mask_kernel = weight.view(1, -1).float().cuda()
def _gaussian_3dconv(self, ksize=3, sigma=1):
x = np.arange(-(ksize//2),ksize//2 + 1,1)
y = np.arange(-(ksize//2),ksize//2 + 1,1)
z = np.arange(-(ksize//2),ksize//2 + 1,1)
xx, yy, zz = np.meshgrid(x,y,z)
kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
kernel = torch.from_numpy(kernel).to(self.sdf.grid)
m = nn.Conv3d(1,1,ksize,stride=1,padding=ksize//2, padding_mode='replicate')
m.weight.data = kernel[None, None, ...] / kernel.sum()
m.bias.data = torch.zeros(1)
for param in m.parameters():
param.requires_grad = False
return m
def init_smooth_conv_test_k3(self, ksize=3, sigma=0.4):
self.smooth_conv_test_k3 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv_test_k5(self, ksize=5, sigma=0.4):
self.smooth_conv_test_k5 = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv test with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_smooth_conv(self, ksize=3, sigma=1):
self.smooth_sdf = ksize > 0
if self.smooth_sdf:
self.smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_feature_smooth_conv(self, ksize=3, sigma=1):
self.smooth_feature = ksize > 0
if self.smooth_feature:
self.feature_smooth_conv = self._gaussian_3dconv(ksize, sigma)
print("- "*10 + "init feature smooth conv with ksize={} and sigma={}".format(ksize, sigma) + " -"*10)
def init_sdf_from_sdf(self, sdf0=None, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from sdf" + " -"*3, "\n")
if sdf0.shape != self.sdf.grid.shape:
sdf0 = F.interpolate(sdf0, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(sdf0 / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data).to(self.sdf.grid) / reduce
else:
self.sdf.grid.data = sdf0.to(self.sdf.grid) / reduce # + self.act_shift
if self.mask_cache is not None:
self._set_nonempty_mask()
if self.smooth_scale:
m = self._gaussian_3dconv(ksize=5, sigma=1)
with torch.no_grad():
self.sdf.grid = torch.nn.Parameter(m(self.sdf.grid.data)).cuda()
self.gradient = self.neus_sdf_gradient()
def init_sdf_from_density(self, smooth=False, reduce=1., ksize=3, sigma=1., zero2neg=True):
print("\n", "- "*3 + "initing sdf from density" + " -"*3, "\n")
self.s = torch.nn.Parameter(torch.ones(1)) * 10
if zero2neg:
self.density.data[self.density.data==0] = -100
if self.density.shape != self.sdf.grid.shape:
self.density.data = F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True)
if smooth:
m = self._gaussian_3dconv(ksize, sigma)
sdf_data = m(-torch.tanh(self.density.data) / reduce)
self.sdf.grid = torch.nn.Parameter(sdf_data)
else:
self.sdf.grid.data = -torch.tanh(self.density.data) / reduce # + self.act_shift
self.gradient = self.neus_sdf_gradient()
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dvgo: voxel_size ', self.voxel_size)
print('dvgo: world_size ', self.world_size)
print('dvgo: voxel_size_base ', self.voxel_size_base)
print('dvgo: voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'nearest': self.nearest,
'mask_cache_path': self.mask_cache_path,
'mask_cache_thres': self.mask_cache_thres,
'fast_color_thres': self.fast_color_thres,
'grad_feat': self.grad_feat,
'sdf_feat': self.sdf_feat,
'k_grad_feat': self.k_grad_feat,
'k_sdf_feat': self.k_sdf_feat,
**self.rgbnet_kwargs,
}
def get_MaskCache_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'act_shift': self.act_shift,
'voxel_size_ratio': self.voxel_size_ratio,
'nearest': self.nearest
}
@torch.no_grad()
def _set_nonempty_mask(self):
# Find grid points that is inside nonempty (occupied) space
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nonempty_mask = self.mask_cache(self_grid_xyz)[None,None].contiguous()
if hasattr(self, 'nonempty_mask'):
self.nonempty_mask = nonempty_mask
else:
self.register_buffer('nonempty_mask', nonempty_mask)
self.density[~self.nonempty_mask] = -100
self.sdf.grid[~self.nonempty_mask] = 1
@torch.no_grad()
def maskout_near_cam_vox(self, cam_o, near):
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.density.shape[2]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.density.shape[3]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.density.shape[4]),
), -1)
nearest_dist = torch.stack([
(self_grid_xyz.unsqueeze(-2) - co).pow(2).sum(-1).sqrt().amin(-1)
for co in cam_o.split(100) # for memory saving
]).amin(0)
self.density[nearest_dist[None,None] <= near] = -100
self.sdf.grid[nearest_dist[None,None] <= near] = 1
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dvgo: scale_volume_grid scale world_size from', ori_world_size, 'to', self.world_size)
self.density = torch.nn.Parameter(
F.interpolate(self.density.data, size=tuple(self.world_size), mode='trilinear', align_corners=True))
self.sdf.scale_volume_grid(self.world_size)
self.k0.scale_volume_grid(self.world_size)
if self.mask_cache is not None:
self._set_nonempty_mask()
print('dvgo: scale_volume_grid finish')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def sdf_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.sdf.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.k0.total_variation_add_grad(w, w, w, dense_mode)
def density_total_variation(self, sdf_tv=0, smooth_grad_tv=0, grad_tv=0, smooth_sdf_tv=0):
t1 = time.time()
tv = 0
if sdf_tv > 0:
tv += total_variation(self.sdf.grid, self.nonempty_mask) / 2 / self.voxel_size * sdf_tv
if smooth_grad_tv > 0:
smooth_tv_error = (self.tv_smooth_conv(self.gradient.permute(1,0,2,3,4)).detach() - self.gradient.permute(1,0,2,3,4))
smooth_tv_error = smooth_tv_error[self.nonempty_mask.repeat(3,1,1,1,1)] ** 2
tv += smooth_tv_error.mean() * smooth_grad_tv
return tv
def k0_total_variation(self, k0_tv=1., k0_grad_tv=0.):
if self.rgbnet is not None:
v = self.k0
else:
v = torch.sigmoid(self.k0)
tv = 0
if k0_tv > 0:
tv += total_variation(v, self.nonempty_mask.repeat(1,v.shape[1],1,1,1))
if k0_grad_tv > 0:
raise NotImplementedError
return tv
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
return 1 - torch.exp(-F.softplus(density + self.act_shift) * interval)
def neus_sdf_gradient(self, mode=None, sdf=None):
# the gradient grid from the sdf grid
if sdf is None:
sdf = self.sdf.grid
if mode is None:
mode = self.grad_mode
if mode == 'interpolate':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,1:-1,:,:] = (sdf[:,0,2:,:,:] - sdf[:,0,:-2,:,:]) / 2 / self.voxel_size
gradient[:,1,:,1:-1,:] = (sdf[:,0,:,2:,:] - sdf[:,0,:,:-2,:]) / 2 / self.voxel_size
gradient[:,2,:,:,1:-1] = (sdf[:,0,:,:,2:] - sdf[:,0,:,:,:-2]) / 2 / self.voxel_size
elif mode == 'grad_conv':
gradient = self.grad_conv(sdf)
elif mode == 'raw':
gradient = torch.zeros([1, 3] + [*self.sdf.grid.shape[-3:]]).to(self.sdf.grid.device)
gradient[:,0,:-1,:,:] = (sdf[:,0,1:,:,:] - sdf[:,0,:-1,:,:]) / self.voxel_size
gradient[:,1,:,:-1,:] = (sdf[:,0,:,1:,:] - sdf[:,0,:,:-1,:]) / self.voxel_size
gradient[:,2,:,:,:-1] = (sdf[:,0,:,:,1:] - sdf[:,0,:,:,:-1]) / self.voxel_size
else:
raise NotImplementedError
return gradient
def neus_alpha_from_sdf_scatter(self, viewdirs, ray_id, dist, sdf, gradients, global_step,
is_train, use_mid=True):
# force s_val value to change with global step
if is_train:
if not self.s_learn:
s_val = 1. / (global_step + self.s_ratio / self.s_start - self.step_start) * self.s_ratio
self.s_val.data = torch.ones_like(self.s_val) * s_val
else:
s_val = self.s_val.item()
else:
s_val = 0
dirs = viewdirs[ray_id]
inv_s = torch.ones(1).cuda() / self.s_val
assert use_mid
if use_mid:
true_cos = (dirs * gradients).sum(-1, keepdim=True)
cos_anneal_ratio = 1.0
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive (M, 1)
sdf = sdf.unsqueeze(-1) # (M, 1)
# dist is a constant in this impelmentation
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
estimated_prev_sdf = sdf - iter_cos * dist.reshape(-1, 1) * 0.5 # (M, 1)
else:
estimated_next_sdf = torch.cat([sdf[..., 1:], sdf[..., -1:]], -1).reshape(-1, 1)
estimated_prev_sdf = torch.cat([sdf[..., :1], sdf[..., :-1]], -1).reshape(-1, 1)
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s.reshape(-1, 1))
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s.reshape(-1, 1))
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0).squeeze()
return s_val, alpha
def grid_sampler(self, xyz, *grids, mode=None, align_corners=True, sample_ret=True, sample_grad=False, displace=0.1, smooth=False):
'''Wrapper for the interp operation'''
if mode is None:
# bilinear is actually trilinear if 5D input is given to grid_sample
mode = 'nearest' if self.nearest else 'bilinear'
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
if smooth:
grid = self.smooth_conv(grids[0])
grids[0] = grid
outs = []
if sample_ret:
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid = grids[0]
ret = F.grid_sample(grid, ind_norm, mode=mode, align_corners=align_corners).reshape(
grid.shape[1],-1).T.reshape(*shape,grid.shape[1]).squeeze(-1)
outs.append(ret)
if sample_grad:
grid = grids[0]
feat, grad = self.sample_sdfs(xyz, grid, displace_list=[1.0], use_grad_norm=False)
feat = torch.cat([feat[:, 4:6], feat[:, 2:4], feat[:, 0:2]], dim=-1)
grad = torch.cat([grad[:, [2]], grad[:, [1]], grad[:, [0]]], dim=-1)
outs.append(grad)
outs.append(feat)
if len(outs) == 1:
return outs[0]
else:
return outs
def sample_sdfs(self, xyz, *grids, displace_list, mode='bilinear', align_corners=True, use_grad_norm=False):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
grid = grids[0]
# ind from xyz to zyx !!!!!
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
grid_size = grid.size()[-3:]
size_factor_zyx = torch.tensor([grid_size[2], grid_size[1], grid_size[0]]).cuda()
ind = ((ind_norm + 1) / 2) * (size_factor_zyx - 1)
offset = torch.tensor([[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]).cuda()
displace = torch.tensor(displace_list).cuda()
offset = offset[:, None, :] * displace[None, :, None]
all_ind = ind.unsqueeze(-2) + offset.view(-1, 3)
all_ind = all_ind.view(1, 1, 1, -1, 3)
all_ind[..., 0] = all_ind[..., 0].clamp(min=0, max=size_factor_zyx[0] - 1)
all_ind[..., 1] = all_ind[..., 1].clamp(min=0, max=size_factor_zyx[1] - 1)
all_ind[..., 2] = all_ind[..., 2].clamp(min=0, max=size_factor_zyx[2] - 1)
all_ind_norm = (all_ind / (size_factor_zyx-1)) * 2 - 1
feat = F.grid_sample(grid, all_ind_norm, mode=mode, align_corners=align_corners)
all_ind = all_ind.view(1, 1, 1, -1, 6, len(displace_list), 3)
diff = all_ind[:, :, :, :, 1::2, :, :] - all_ind[:, :, :, :, 0::2, :, :]
diff, _ = diff.max(dim=-1)
feat_ = feat.view(1, 1, 1, -1, 6, len(displace_list))
feat_diff = feat_[:, :, :, :, 1::2, :] - feat_[:, :, :, :, 0::2, :]
grad = feat_diff / diff / self.voxel_size
feat = feat.view(shape[-1], 6, len(displace_list))
grad = grad.view(shape[-1], 3, len(displace_list))
if use_grad_norm:
grad = grad / (grad.norm(dim=1, keepdim=True) + 1e-5)
feat = feat.view(shape[-1], 6 * len(displace_list))
grad = grad.view(shape[-1], 3 * len(displace_list))
return feat, grad
def hit_coarse_geo(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):
'''Check whether the rays hit the solved coarse geometry or not'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
shape = rays_o.shape[:-1]
rays_o = rays_o.reshape(-1, 3).contiguous()
rays_d = rays_d.reshape(-1, 3).contiguous()
stepdist = stepsize * self.voxel_size
ray_pts, mask_outbbox, ray_id = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)[:3]
mask_inbbox = ~mask_outbbox
hit = torch.zeros([len(rays_o)], dtype=torch.bool)
hit[ray_id[mask_inbbox][self.mask_cache(ray_pts[mask_inbbox])]] = 1
return hit.reshape(shape)
def sample_ray(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):
'''Sample query points on rays.
All the output points are sorted from near to far.
Input:
rays_o, rayd_d: both in [N, 3] indicating ray configurations.
near, far: the near and far distance of the rays.
stepsize: the number of voxels of each sample step.
Output:
ray_pts: [M, 3] storing all the sampled points.
ray_id: [M] the index of the ray of each point.
step_id: [M] the i'th step on a ray of each point.
'''
far = 1e9 # the given far can be too small while rays stop when hitting scene bbox
rays_o = rays_o.contiguous()
rays_d = rays_d.contiguous()
stepdist = stepsize * self.voxel_size
ray_pts, mask_outbbox, ray_id, step_id, N_steps, t_min, t_max = render_utils_cuda.sample_pts_on_rays(
rays_o, rays_d, self.xyz_min, self.xyz_max, near, far, stepdist)
# correct the cuda output N_steps, which could have a bias of 1 randomly
N_steps = ray_id.unique(return_counts=True)[1]
mask_inbbox = ~mask_outbbox
ray_pts = ray_pts[mask_inbbox]
ray_id = ray_id[mask_inbbox]
step_id = step_id[mask_inbbox]
return ray_pts, ray_id, step_id, mask_outbbox, N_steps
def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):
'''Volume rendering'''
ret_dict = {}
N = len(rays_o)
ray_pts, ray_id, step_id, mask_outbbox, N_steps = self.sample_ray(
rays_o=rays_o, rays_d=rays_d, is_train=global_step is not None, **render_kwargs)
# interval = render_kwargs['stepsize'] * self.voxel_size_ratio
gradient, gradient_error = None, None
if self.mask_cache is not None:
mask = self.mask_cache(ray_pts)
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
mask_outbbox[~mask_outbbox] |= ~mask
sdf_grid = self.smooth_conv(self.sdf.grid) if self.smooth_sdf else self.sdf.grid
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
dist = render_kwargs['stepsize'] * self.voxel_size
s_val, alpha = self.neus_alpha_from_sdf_scatter(viewdirs, ray_id, dist, sdf, gradient, global_step=global_step,
is_train=global_step is not None, use_mid=True)
mask = None
if self.fast_color_thres > 0:
mask = (alpha > self.fast_color_thres)
alpha = alpha[mask]
ray_id = ray_id[mask]
ray_pts = ray_pts[mask]
step_id = step_id[mask]
gradient = gradient[mask] # merge to sample once
sdf = sdf[mask]
# compute accumulated transmittance
if ray_id.ndim == 2:
print(mask, alpha, ray_id)
mask = mask.squeeze()
alpha = alpha.squeeze()
ray_id = ray_id.squeeze()
ray_pts = ray_pts.squeeze()
step_id = step_id.squeeze()
gradient = gradient.squeeze()
sdf = sdf.squeeze()
weights, alphainv_last = Alphas2Weights.apply(alpha, ray_id, N)
if self.fast_color_thres > 0:
mask = (weights > self.fast_color_thres)
weights = weights[mask]
alpha = alpha[mask]
ray_pts = ray_pts[mask]
ray_id = ray_id[mask]
step_id = step_id[mask]
gradient = gradient[mask]
sdf = sdf[mask]
k0 = self.k0(ray_pts)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid, displace_list=all_grad_inds_, use_grad_norm=self.use_grad_norm)
else:
all_feat, all_grad = None, None
self.gradient = self.neus_sdf_gradient()
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat([viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
if self.use_rgbnet_k0:
rgb_feat = torch.cat([
k0, xyz_emb, viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
else:
rgb_feat = torch.cat([
xyz_emb, viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
hierarchical_feats = []
if self.center_sdf:
hierarchical_feats.append(sdf[:, None])
if len(all_grad_inds) > 0:
hierarchical_feats.append(all_feat)
hierarchical_feats.append(all_grad)
if len(hierarchical_feats) > 0:
rgb_feat = torch.cat([rgb_feat, *hierarchical_feats], dim=-1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
k_xyz_emb = (rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()], -1)
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(-2)
k_viewdirs_emb = torch.cat([viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat = torch.cat([
k0, k_xyz_emb, k_viewdirs_emb.flatten(0, -2)[ray_id]
], -1)
assert len(self.k_grad_feat) == 1 and self.k_grad_feat[0] == 1.0
assert len(self.k_sdf_feat) == 0
all_feats_ = [gradient]
if self.k_center_sdf:
all_feats_.append(sdf[:, None])
if len(all_feats_) > 0:
all_feats_ = torch.cat(all_feats_, dim=-1)
k_rgb_feat = torch.cat([k_rgb_feat, all_feats_], dim=-1)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat.detach()], dim=-1)
else:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat], dim=-1)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + self.k_rgbnet(k_rgb_feat)
else:
k_rgb_logit = rgb_logit + self.k_rgbnet(k_rgb_feat)
k_rgb = torch.sigmoid(k_rgb_logit)
k_rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * k_rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum') + alphainv_last[..., None] * render_kwargs['bg']
k_rgb_marched = k_rgb_marched.clamp(0, 1)
else:
k_rgb_marched = None
# Ray marching
rgb_marched = segment_coo(
src=(weights.unsqueeze(-1) * rgb),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum') + alphainv_last[..., None] * render_kwargs['bg']
if gradient is not None and render_kwargs.get('render_grad', False):
normal = gradient / (gradient.norm(2, -1, keepdim=True) + 1e-6)
normal_marched = segment_coo(
src=(weights.unsqueeze(-1) * normal),
index=ray_id, out=torch.zeros([N, 3]), reduce='sum')
else:
normal_marched = None
if render_kwargs.get('render_depth', False):
with torch.no_grad():
depth = segment_coo(
src=(weights * step_id * dist), index=ray_id, out=torch.zeros([N]), reduce='sum')
disp = 1 / depth
else:
depth = None
disp = 0
ret_dict.update({
'alphainv_cum': alphainv_last,
'weights': weights,
'rgb_marched': rgb_marched,
# 'k_rgb_marched': k_rgb_marched,
'normal_marched': normal_marched,
'raw_alpha': alpha,
'raw_rgb': rgb,
'depth': depth,
'disp': disp,
'mask': mask,
'mask_outbbox':mask_outbbox,
'gradient': gradient,
"gradient_error": gradient_error,
"s_val": s_val,
})
if self.use_rgb_k:
ret_dict.update({
'rgb_marched': k_rgb_marched,
'rgb_marched0': rgb_marched,
})
return ret_dict
def mesh_color_forward(self, ray_pts, **kwargs):
sdf_grid = self.smooth_conv(
self.sdf.grid) if self.smooth_sdf else self.sdf.grid
# self.gradient = self.neus_sdf_gradient()
sdf, gradient, feat = self.grid_sampler(ray_pts, sdf_grid, sample_ret=True, sample_grad=True, displace=1.0)
normal = gradient / (gradient.norm(dim=-1, keepdim=True) + 1e-5)
viewdirs = -normal
k0 = self.k0(ray_pts)
all_grad_inds = list(set(self.grad_feat + self.k_grad_feat))
all_sdf_inds = list(set(self.sdf_feat + self.k_sdf_feat))
assert all_grad_inds == all_sdf_inds
if len(all_grad_inds) > 0:
all_grad_inds = sorted(all_grad_inds)
all_grad_inds_ = deepcopy(all_grad_inds)
all_feat, all_grad = self.sample_sdfs(ray_pts, sdf_grid,
displace_list=all_grad_inds_,
use_grad_norm=self.use_grad_norm)
else:
all_feat, all_grad = None, None
viewdirs_emb = (viewdirs.unsqueeze(-1) * self.viewfreq).flatten(-2)
viewdirs_emb = torch.cat(
[viewdirs, viewdirs_emb.sin(), viewdirs_emb.cos()], -1)
rays_xyz = (ray_pts - self.xyz_min) / (self.xyz_max - self.xyz_min)
xyz_emb = (rays_xyz.unsqueeze(-1) * self.posfreq).flatten(-2)
xyz_emb = torch.cat([rays_xyz, xyz_emb.sin(), xyz_emb.cos()], -1)
if self.use_rgbnet_k0:
rgb_feat = torch.cat([
k0, xyz_emb, viewdirs_emb.flatten(0, -2)
], -1)
else:
rgb_feat = torch.cat([
xyz_emb, viewdirs_emb.flatten(0, -2)
], -1)
hierarchical_feats = []
if self.center_sdf:
hierarchical_feats.append(sdf[:, None])
if len(all_grad_inds) > 0:
hierarchical_feats.append(all_feat)
hierarchical_feats.append(all_grad)
if len(hierarchical_feats) > 0:
rgb_feat = torch.cat([rgb_feat, *hierarchical_feats], dim=-1)
rgb_logit = self.rgbnet(rgb_feat)
rgb = torch.sigmoid(rgb_logit)
if self.use_rgb_k:
k_xyz_emb = (rays_xyz.unsqueeze(-1) * self.k_posfreq).flatten(-2)
k_xyz_emb = torch.cat([rays_xyz, k_xyz_emb.sin(), k_xyz_emb.cos()],
-1)
k_viewdirs_emb = (viewdirs.unsqueeze(-1) * self.k_viewfreq).flatten(
-2)
k_viewdirs_emb = torch.cat(
[viewdirs, k_viewdirs_emb.sin(), k_viewdirs_emb.cos()], -1)
k_rgb_feat = torch.cat([
k0, k_xyz_emb, k_viewdirs_emb.flatten(0, -2)
], -1)
assert len(self.k_grad_feat) == 1 and self.k_grad_feat[0] == 1.0
assert len(self.k_sdf_feat) == 0
all_feats_ = [gradient]
if self.k_center_sdf:
all_feats_.append(sdf[:, None])
if len(all_feats_) > 0:
all_feats_ = torch.cat(all_feats_, dim=-1)
k_rgb_feat = torch.cat([k_rgb_feat, all_feats_], dim=-1)
if self.k_res:
color_feat = rgb_logit
if self.k_detach_1:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat.detach()], dim=-1)
else:
k_rgb_feat = torch.cat([k_rgb_feat, color_feat], dim=-1)
if self.k_detach_2:
k_rgb_logit = rgb_logit.detach() + self.k_rgbnet(k_rgb_feat)
else:
k_rgb_logit = rgb_logit + self.k_rgbnet(k_rgb_feat)
rgb = torch.sigmoid(k_rgb_logit)
return rgb
def extract_geometry(self, bound_min, bound_max, resolution=128, threshold=0.0, smooth=True, sigma=0.5, **kwargs):
if self.smooth_sdf:
sdf_grid = self.smooth_conv(self.sdf.grid)
else:
if smooth:
self.init_smooth_conv_test_k3(sigma=sigma)
sdf_grid = self.smooth_conv_test_k3(self.sdf.grid)
else:
sdf_grid = self.sdf.grid
query_func = lambda pts: self.grid_sampler(pts, - sdf_grid)
if resolution is None:
resolution = self.world_size[0]
return extract_geometry(bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=query_func)
''' Module for the searched coarse geometry
It supports query for the known free space and unknown space.
'''
class MaskCache(nn.Module):
def __init__(self, path, mask_cache_thres, ks=3):
super().__init__()
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
self.register_buffer('xyz_min', torch.FloatTensor(st['MaskCache_kwargs']['xyz_min']))
self.register_buffer('xyz_max', torch.FloatTensor(st['MaskCache_kwargs']['xyz_max']))
self.register_buffer('density', F.max_pool3d(
st['model_state_dict']['density'], kernel_size=ks, padding=ks//2, stride=1))
self.act_shift = st['MaskCache_kwargs']['act_shift']
self.voxel_size_ratio = st['MaskCache_kwargs']['voxel_size_ratio']
self.nearest = st['MaskCache_kwargs'].get('nearest', False)
@torch.no_grad()
def forward(self, xyz):
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
if self.nearest:
density = F.grid_sample(self.density, ind_norm, align_corners=True, mode='nearest')
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
else:
density = F.grid_sample(self.density, ind_norm, align_corners=True)
alpha = 1 - torch.exp(-F.softplus(density + self.act_shift) * self.voxel_size_ratio)
alpha = alpha.reshape(*shape)
return (alpha >= self.mask_cache_thres)
''' Misc
'''
def cumprod_exclusive(p):
# Not sure why: it will be slow at the end of training if clamping at 1e-10 is not applied
return torch.cat([torch.ones_like(p[...,[0]]), p.clamp_min(1e-10).cumprod(-1)], -1)
def get_ray_marching_ray(alpha):
alphainv_cum = cumprod_exclusive(1-alpha)
weights = alpha * alphainv_cum[..., :-1]
return weights, alphainv_cum
def total_variation(v, mask=None):
if torch.__version__ == '1.10.0':
tv2 = v.diff(dim=2).abs()
tv3 = v.diff(dim=3).abs()
tv4 = v.diff(dim=4).abs()
else:
tv2 = (v[:,:,1:,:,:] - v[:,:,:-1,:,:]).abs()
tv3 = (v[:,:,:,1:,:] - v[:,:,:,:-1,:]).abs()
tv4 = (v[:,:,:,:,1:] - v[:,:,:,:,:-1]).abs()
if mask is not None:
tv2 = tv2[mask[:,:,:-1] & mask[:,:,1:]]
tv3 = tv3[mask[:,:,:,:-1] & mask[:,:,:,1:]]
tv4 = tv4[mask[:,:,:,:,:-1] & mask[:,:,:,:,1:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
def total_variation_step2(v, mask=None):
tv2 = (v[:,:,2:,:,:] - v[:,:,:-2,:,:]).abs() / 2
tv3 = (v[:,:,:,2:,:] - v[:,:,:,:-2,:]).abs() / 2
tv4 = (v[:,:,:,:,2:] - v[:,:,:,:,:-2]).abs() / 2
if mask is not None:
tv2 = tv2[mask[:,:,:-2] & mask[:,:,2:]]
tv3 = tv3[mask[:,:,:,:-2] & mask[:,:,:,2:]]
tv4 = tv4[mask[:,:,:,:,:-2] & mask[:,:,:,:,2:]]
return (tv2.mean() + tv3.mean() + tv4.mean()) / 3
class Alphas2Weights(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, ray_id, N):
weights, T, alphainv_last, i_start, i_end = render_utils_cuda.alpha2weight(alpha, ray_id, N)
if alpha.requires_grad:
ctx.save_for_backward(alpha, weights, T, alphainv_last, i_start, i_end)
ctx.n_rays = N
return weights, alphainv_last
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, grad_weights, grad_last):
alpha, weights, T, alphainv_last, i_start, i_end = ctx.saved_tensors
grad = render_utils_cuda.alpha2weight_backward(
alpha, weights, T, alphainv_last,
i_start, i_end, ctx.n_rays, grad_weights, grad_last)
return grad, None, None
''' Ray and batch
'''
def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):
i, j = torch.meshgrid(
torch.linspace(0, W-1, W, device=c2w.device),
torch.linspace(0, H-1, H, device=c2w.device)) # pytorch's meshgrid has indexing='ij'
i = i.t().float()
j = j.t().float()
if mode == 'lefttop':
pass
elif mode == 'center':
i, j = i+0.5, j+0.5
elif mode == 'random':
i = i+torch.rand_like(i)
j = j+torch.rand_like(j)
else:
raise NotImplementedError
if flip_x:
i = i.flip((1,))
if flip_y:
j = j.flip((0,))
if inverse_y:
dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1)
else:
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,3].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,3], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
def get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):
rays_o, rays_d = get_rays(H, W, K, c2w, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y, mode=mode)
viewdirs = rays_d / rays_d.norm(dim=-1, keepdim=True)
if ndc:
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d)
return rays_o, rays_d, viewdirs
@torch.no_grad()
def get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays: start')
assert len(np.unique(HW, axis=0)) == 1
assert len(np.unique(Ks.reshape(len(Ks),-1), axis=0)) == 1
assert len(rgb_tr) == len(train_poses) and len(rgb_tr) == len(Ks) and len(rgb_tr) == len(HW)
H, W = HW[0]
K = Ks[0]
eps_time = time.time()
rays_o_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
rays_d_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
viewdirs_tr = torch.zeros([len(rgb_tr), H, W, 3], device=rgb_tr.device)
imsz = [1] * len(rgb_tr)
for i, c2w in enumerate(train_poses):
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc, inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
rays_o_tr[i].copy_(rays_o.to(rgb_tr.device))
rays_d_tr[i].copy_(rays_d.to(rgb_tr.device))
viewdirs_tr[i].copy_(viewdirs.to(rgb_tr.device))
del rays_o, rays_d, viewdirs
eps_time = time.time() - eps_time
print('get_training_rays: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
@torch.no_grad()
def get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):
print('get_training_rays_flatten: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
eps_time = time.time()
DEVICE = rgb_tr_ori[0].device
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
n = H * W
rgb_tr[top:top+n].copy_(img.flatten(0,1))
rays_o_tr[top:top+n].copy_(rays_o.flatten(0,1).to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d.flatten(0,1).to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs.flatten(0,1).to(DEVICE))
imsz.append(n)
top += n
assert top == N
eps_time = time.time() - eps_time
print('get_training_rays_flatten: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs, rgbnet_sup_reduce=1):
print('get_training_rays_in_maskcache_sampling: start')
assert len(rgb_tr_ori) == len(train_poses) and len(rgb_tr_ori) == len(Ks) and len(rgb_tr_ori) == len(HW)
CHUNK = 64
DEVICE = rgb_tr_ori[0].device
eps_time = time.time()
N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)
rgb_tr = torch.zeros([N,3], device=DEVICE)
rays_o_tr = torch.zeros_like(rgb_tr)
rays_d_tr = torch.zeros_like(rgb_tr)
viewdirs_tr = torch.zeros_like(rgb_tr)
imsz = []
top = 0
for c2w, img, (H, W), K in zip(train_poses, rgb_tr_ori, HW, Ks):
assert img.shape[:2] == (H, W)
rays_o, rays_d, viewdirs = get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w, ndc=ndc,
inverse_y=inverse_y, flip_x=flip_x, flip_y=flip_y)
mask = torch.ones(img.shape[:2], device=DEVICE, dtype=torch.bool)
for i in range(0, img.shape[0], CHUNK):
mask[i:i+CHUNK] = model.hit_coarse_geo(
rays_o=rays_o[i:i+CHUNK], rays_d=rays_d[i:i+CHUNK], **render_kwargs).to(DEVICE)
n = mask.sum()
rgb_tr[top:top+n].copy_(img[mask])
rays_o_tr[top:top+n].copy_(rays_o[mask].to(DEVICE))
rays_d_tr[top:top+n].copy_(rays_d[mask].to(DEVICE))
viewdirs_tr[top:top+n].copy_(viewdirs[mask].to(DEVICE))
imsz.append(n)
top += n
print('get_training_rays_in_maskcache_sampling: ratio', top / N)
rgb_tr = rgb_tr[:top]
rays_o_tr = rays_o_tr[:top]
rays_d_tr = rays_d_tr[:top]
viewdirs_tr = viewdirs_tr[:top]
eps_time = time.time() - eps_time
print('get_training_rays_in_maskcache_sampling: finish (eps time:', eps_time, 'sec)')
return rgb_tr, rays_o_tr, rays_d_tr, viewdirs_tr, imsz
def batch_indices_generator(N, BS):
# torch.randperm on cuda produce incorrect results in my machine
idx, top = torch.LongTensor(np.random.permutation(N)), 0
while True:
if top + BS > N:
idx, top = torch.LongTensor(np.random.permutation(N)), 0
yield idx[top:top+BS]
top += BS
| 51,680 | 42.871817 | 153 | py |
Voxurf | Voxurf-main/lib/load_nerfpp.py | '''
Modify from
https://github.com/Kai-46/nerfplusplus/blob/master/data_loader_split.py
'''
import os
import glob
import scipy
import imageio
import numpy as np
import torch
########################################################################################################################
# camera coordinate system: x-->right, y-->down, z-->scene (opencv/colmap convention)
# poses is camera-to-world
########################################################################################################################
def find_files(dir, exts):
if os.path.isdir(dir):
files_grabbed = []
for ext in exts:
files_grabbed.extend(glob.glob(os.path.join(dir, ext)))
if len(files_grabbed) > 0:
files_grabbed = sorted(files_grabbed)
return files_grabbed
else:
return []
def load_data_split(split_dir, skip=1, try_load_min_depth=True, only_img_files=False):
def parse_txt(filename):
assert os.path.isfile(filename)
nums = open(filename).read().split()
return np.array([float(x) for x in nums]).reshape([4, 4]).astype(np.float32)
if only_img_files:
img_files = find_files('{}/rgb'.format(split_dir), exts=['*.png', '*.jpg'])
return img_files
# camera parameters files
intrinsics_files = find_files('{}/intrinsics'.format(split_dir), exts=['*.txt'])
pose_files = find_files('{}/pose'.format(split_dir), exts=['*.txt'])
intrinsics_files = intrinsics_files[::skip]
pose_files = pose_files[::skip]
cam_cnt = len(pose_files)
# img files
img_files = find_files('{}/rgb'.format(split_dir), exts=['*.png', '*.jpg'])
if len(img_files) > 0:
img_files = img_files[::skip]
assert(len(img_files) == cam_cnt)
else:
img_files = [None, ] * cam_cnt
# mask files
mask_files = find_files('{}/mask'.format(split_dir), exts=['*.png', '*.jpg'])
if len(mask_files) > 0:
mask_files = mask_files[::skip]
assert(len(mask_files) == cam_cnt)
else:
mask_files = [None, ] * cam_cnt
# min depth files
mindepth_files = find_files('{}/min_depth'.format(split_dir), exts=['*.png', '*.jpg'])
if try_load_min_depth and len(mindepth_files) > 0:
mindepth_files = mindepth_files[::skip]
assert(len(mindepth_files) == cam_cnt)
else:
mindepth_files = [None, ] * cam_cnt
return intrinsics_files, pose_files, img_files, mask_files, mindepth_files
def rerotate_poses(poses, render_poses):
poses = np.copy(poses)
centroid = poses[:,:3,3].mean(0)
poses[:,:3,3] = poses[:,:3,3] - centroid
# Find the minimum pca vector with minimum eigen value
x = poses[:,:3,3]
mu = x.mean(0)
cov = np.cov((x-mu).T)
ev , eig = np.linalg.eig(cov)
cams_up = eig[:,np.argmin(ev)]
if cams_up[1] < 0:
cams_up = -cams_up
# Find rotation matrix that align cams_up with [0,1,0]
R = scipy.spatial.transform.Rotation.align_vectors(
[[0,-1,0]], cams_up[None])[0].as_matrix()
# Apply rotation and add back the centroid position
poses[:,:3,:3] = R @ poses[:,:3,:3]
poses[:,:3,[3]] = R @ poses[:,:3,[3]]
poses[:,:3,3] = poses[:,:3,3] + centroid
render_poses = np.copy(render_poses)
render_poses[:,:3,3] = render_poses[:,:3,3] - centroid
render_poses[:,:3,:3] = R @ render_poses[:,:3,:3]
render_poses[:,:3,[3]] = R @ render_poses[:,:3,[3]]
render_poses[:,:3,3] = render_poses[:,:3,3] + centroid
return poses, render_poses
def load_nerfpp_data(basedir, rerotate=True):
tr_K, tr_c2w, tr_im_path = load_data_split(os.path.join(basedir, 'train'))[:3]
te_K, te_c2w, te_im_path = load_data_split(os.path.join(basedir, 'test'))[:3]
assert len(tr_K) == len(tr_c2w) and len(tr_K) == len(tr_im_path)
assert len(te_K) == len(te_c2w) and len(te_K) == len(te_im_path)
# Determine split id list
i_split = [[], []]
i = 0
for _ in tr_c2w:
i_split[0].append(i)
i += 1
for _ in te_c2w:
i_split[1].append(i)
i += 1
# Load camera intrinsics. Assume all images share a intrinsic.
K_flatten = np.loadtxt(tr_K[0])
for path in tr_K:
assert np.allclose(np.loadtxt(path), K_flatten)
for path in te_K:
assert np.allclose(np.loadtxt(path), K_flatten)
K = K_flatten.reshape(4,4)[:3,:3]
# Load camera poses
poses = []
for path in tr_c2w:
poses.append(np.loadtxt(path).reshape(4,4))
for path in te_c2w:
poses.append(np.loadtxt(path).reshape(4,4))
# Load images
imgs = []
for path in tr_im_path:
imgs.append(imageio.imread(path) / 255.)
for path in te_im_path:
imgs.append(imageio.imread(path) / 255.)
# Bundle all data
imgs = np.stack(imgs, 0)
poses = np.stack(poses, 0)
i_split.append(i_split[1])
H, W = imgs.shape[1:3]
focal = K[[0,1], [0,1]].mean()
# Generate movie trajectory
render_poses_path = sorted(glob.glob(os.path.join(basedir, 'camera_path', 'pose', '*txt')))
render_poses = []
for path in render_poses_path:
render_poses.append(np.loadtxt(path).reshape(4,4))
render_poses = np.array(render_poses)
render_K = np.loadtxt(glob.glob(os.path.join(basedir, 'camera_path', 'intrinsics', '*txt'))[0]).reshape(4,4)[:3,:3]
render_poses[:,:,0] *= K[0,0] / render_K[0,0]
render_poses[:,:,1] *= K[1,1] / render_K[1,1]
if rerotate:
poses, render_poses = rerotate_poses(poses, render_poses)
render_poses = torch.Tensor(render_poses)
return imgs, poses, render_poses, [H, W, focal], K, i_split
| 5,638 | 33.175758 | 120 | py |
Voxurf | Voxurf-main/lib/grid.py | import os
import time
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'render_utils.cpp'), os.path.join('cuda', 'render_utils_kernel.cu')]],
verbose=True)
total_variation_cuda = load(
name='total_variation_cuda',
sources=[
os.path.join(parent_dir, path)
for path in [os.path.join('cuda', 'total_variation.cpp'), os.path.join('cuda', 'total_variation_kernel.cu')]],
verbose=True)
def create_grid(type, **kwargs):
if type == 'DenseGrid':
return DenseGrid(**kwargs)
elif type == 'TensoRFGrid':
return TensoRFGrid(**kwargs)
else:
raise NotImplementedError
''' Dense 3D grid
'''
class DenseGrid(nn.Module):
def __init__(self, channels, world_size, xyz_min, xyz_max, **kwargs):
super(DenseGrid, self).__init__()
self.channels = channels
self.world_size = world_size
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
self.grid = nn.Parameter(torch.zeros([1, channels, *world_size]))
def forward(self, xyz):
'''
xyz: global coordinates to query
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,1,-1,3)
ind_norm = ((xyz - self.xyz_min) / (self.xyz_max - self.xyz_min)).flip((-1,)) * 2 - 1
out = F.grid_sample(self.grid, ind_norm, mode='bilinear', align_corners=True)
out = out.reshape(self.channels,-1).T.reshape(*shape,self.channels)
if self.channels == 1:
out = out.squeeze(-1)
return out
def scale_volume_grid(self, new_world_size):
if self.channels == 0:
self.grid = nn.Parameter(torch.zeros([1, self.channels, *new_world_size]))
else:
self.grid = nn.Parameter(
F.interpolate(self.grid.data, size=tuple(new_world_size), mode='trilinear', align_corners=True))
def total_variation_add_grad(self, wx, wy, wz, dense_mode, mask=None):
'''Add gradients by total variation loss in-place'''
if mask is None:
total_variation_cuda.total_variation_add_grad(
self.grid, self.grid.grad, wx, wy, wz, dense_mode)
else:
mask = mask.detach()
if self.grid.size(1) > 1 and mask.size() != self.grid.size():
mask = mask.repeat(1, self.grid.size(1), 1, 1, 1).contiguous()
assert mask.size() == self.grid.size()
total_variation_cuda.total_variation_add_grad_new(
self.grid, self.grid.grad, mask.float(), wx, wy, wz, dense_mode)
def get_dense_grid(self):
return self.grid
@torch.no_grad()
def __isub__(self, val):
self.grid.data -= val
return self
def extra_repr(self):
return f'channels={self.channels}, world_size={self.world_size.tolist()}'
''' Vector-Matrix decomposited grid
See TensoRF: Tensorial Radiance Fields (https://arxiv.org/abs/2203.09517)
'''
class TensoRFGrid(nn.Module):
def __init__(self, channels, world_size, xyz_min, xyz_max, config):
super(TensoRFGrid, self).__init__()
self.channels = channels
self.world_size = world_size
self.config = config
self.register_buffer('xyz_min', torch.Tensor(xyz_min))
self.register_buffer('xyz_max', torch.Tensor(xyz_max))
X, Y, Z = world_size
R = config['n_comp']
Rxy = config.get('n_comp_xy', R)
self.xy_plane = nn.Parameter(torch.randn([1, Rxy, X, Y]) * 0.1)
self.xz_plane = nn.Parameter(torch.randn([1, R, X, Z]) * 0.1)
self.yz_plane = nn.Parameter(torch.randn([1, R, Y, Z]) * 0.1)
self.x_vec = nn.Parameter(torch.randn([1, R, X, 1]) * 0.1)
self.y_vec = nn.Parameter(torch.randn([1, R, Y, 1]) * 0.1)
self.z_vec = nn.Parameter(torch.randn([1, Rxy, Z, 1]) * 0.1)
if self.channels > 1:
self.f_vec = nn.Parameter(torch.ones([R+R+Rxy, channels]))
nn.init.kaiming_uniform_(self.f_vec, a=np.sqrt(5))
def forward(self, xyz):
'''
xyz: global coordinates to query
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(1,1,-1,3)
ind_norm = (xyz - self.xyz_min) / (self.xyz_max - self.xyz_min) * 2 - 1
ind_norm = torch.cat([ind_norm, torch.zeros_like(ind_norm[...,[0]])], dim=-1)
if self.channels > 1:
out = compute_tensorf_feat(
self.xy_plane, self.xz_plane, self.yz_plane,
self.x_vec, self.y_vec, self.z_vec, self.f_vec, ind_norm)
out = out.reshape(*shape,self.channels)
else:
out = compute_tensorf_val(
self.xy_plane, self.xz_plane, self.yz_plane,
self.x_vec, self.y_vec, self.z_vec, ind_norm)
out = out.reshape(*shape)
return out
def scale_volume_grid(self, new_world_size):
if self.channels == 0:
return
X, Y, Z = new_world_size
self.xy_plane = nn.Parameter(F.interpolate(self.xy_plane.data, size=[X,Y], mode='bilinear', align_corners=True))
self.xz_plane = nn.Parameter(F.interpolate(self.xz_plane.data, size=[X,Z], mode='bilinear', align_corners=True))
self.yz_plane = nn.Parameter(F.interpolate(self.yz_plane.data, size=[Y,Z], mode='bilinear', align_corners=True))
self.x_vec = nn.Parameter(F.interpolate(self.x_vec.data, size=[X,1], mode='bilinear', align_corners=True))
self.y_vec = nn.Parameter(F.interpolate(self.y_vec.data, size=[Y,1], mode='bilinear', align_corners=True))
self.z_vec = nn.Parameter(F.interpolate(self.z_vec.data, size=[Z,1], mode='bilinear', align_corners=True))
def total_variation_add_grad(self, wx, wy, wz, dense_mode):
'''Add gradients by total variation loss in-place'''
loss = wx * F.smooth_l1_loss(self.xy_plane[:,:,1:], self.xy_plane[:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.xy_plane[:,:,:,1:], self.xy_plane[:,:,:,:-1], reduction='sum') +\
wx * F.smooth_l1_loss(self.xz_plane[:,:,1:], self.xz_plane[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.xz_plane[:,:,:,1:], self.xz_plane[:,:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.yz_plane[:,:,1:], self.yz_plane[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.yz_plane[:,:,:,1:], self.yz_plane[:,:,:,:-1], reduction='sum') +\
wx * F.smooth_l1_loss(self.x_vec[:,:,1:], self.x_vec[:,:,:-1], reduction='sum') +\
wy * F.smooth_l1_loss(self.y_vec[:,:,1:], self.y_vec[:,:,:-1], reduction='sum') +\
wz * F.smooth_l1_loss(self.z_vec[:,:,1:], self.z_vec[:,:,:-1], reduction='sum')
loss /= 6
loss.backward()
def get_dense_grid(self):
if self.channels > 1:
feat = torch.cat([
torch.einsum('rxy,rz->rxyz', self.xy_plane[0], self.z_vec[0,:,:,0]),
torch.einsum('rxz,ry->rxyz', self.xz_plane[0], self.y_vec[0,:,:,0]),
torch.einsum('ryz,rx->rxyz', self.yz_plane[0], self.x_vec[0,:,:,0]),
])
grid = torch.einsum('rxyz,rc->cxyz', feat, self.f_vec)[None]
else:
grid = torch.einsum('rxy,rz->xyz', self.xy_plane[0], self.z_vec[0,:,:,0]) + \
torch.einsum('rxz,ry->xyz', self.xz_plane[0], self.y_vec[0,:,:,0]) + \
torch.einsum('ryz,rx->xyz', self.yz_plane[0], self.x_vec[0,:,:,0])
grid = grid[None,None]
return grid
def extra_repr(self):
return f'channels={self.channels}, world_size={self.world_size.tolist()}, n_comp={self.config["n_comp"]}'
def compute_tensorf_feat(xy_plane, xz_plane, yz_plane, x_vec, y_vec, z_vec, f_vec, ind_norm):
# Interp feature (feat shape: [n_pts, n_comp])
xy_feat = F.grid_sample(xy_plane, ind_norm[:,:,:,[1,0]], mode='bilinear', align_corners=True).flatten(0,2).T
xz_feat = F.grid_sample(xz_plane, ind_norm[:,:,:,[2,0]], mode='bilinear', align_corners=True).flatten(0,2).T
yz_feat = F.grid_sample(yz_plane, ind_norm[:,:,:,[2,1]], mode='bilinear', align_corners=True).flatten(0,2).T
x_feat = F.grid_sample(x_vec, ind_norm[:,:,:,[3,0]], mode='bilinear', align_corners=True).flatten(0,2).T
y_feat = F.grid_sample(y_vec, ind_norm[:,:,:,[3,1]], mode='bilinear', align_corners=True).flatten(0,2).T
z_feat = F.grid_sample(z_vec, ind_norm[:,:,:,[3,2]], mode='bilinear', align_corners=True).flatten(0,2).T
# Aggregate components
feat = torch.cat([
xy_feat * z_feat,
xz_feat * y_feat,
yz_feat * x_feat,
], dim=-1)
feat = torch.mm(feat, f_vec)
return feat
def compute_tensorf_val(xy_plane, xz_plane, yz_plane, x_vec, y_vec, z_vec, ind_norm):
# Interp feature (feat shape: [n_pts, n_comp])
xy_feat = F.grid_sample(xy_plane, ind_norm[:,:,:,[1,0]], mode='bilinear', align_corners=True).flatten(0,2).T
xz_feat = F.grid_sample(xz_plane, ind_norm[:,:,:,[2,0]], mode='bilinear', align_corners=True).flatten(0,2).T
yz_feat = F.grid_sample(yz_plane, ind_norm[:,:,:,[2,1]], mode='bilinear', align_corners=True).flatten(0,2).T
x_feat = F.grid_sample(x_vec, ind_norm[:,:,:,[3,0]], mode='bilinear', align_corners=True).flatten(0,2).T
y_feat = F.grid_sample(y_vec, ind_norm[:,:,:,[3,1]], mode='bilinear', align_corners=True).flatten(0,2).T
z_feat = F.grid_sample(z_vec, ind_norm[:,:,:,[3,2]], mode='bilinear', align_corners=True).flatten(0,2).T
# Aggregate components
feat = (xy_feat * z_feat).sum(-1) + (xz_feat * y_feat).sum(-1) + (yz_feat * x_feat).sum(-1)
return feat
''' Mask grid
It supports query for the known free space and unknown space.
'''
class MaskGrid(nn.Module):
def __init__(self, path=None, mask_cache_thres=None, mask=None, xyz_min=None, xyz_max=None):
super(MaskGrid, self).__init__()
if path is not None:
st = torch.load(path)
self.mask_cache_thres = mask_cache_thres
density = F.max_pool3d(st['model_state_dict']['density.grid'], kernel_size=3, padding=1, stride=1)
alpha = 1 - torch.exp(-F.softplus(density + st['model_state_dict']['act_shift']) * st['model_kwargs']['voxel_size_ratio'])
mask = (alpha >= self.mask_cache_thres).squeeze(0).squeeze(0)
xyz_min = torch.Tensor(st['model_kwargs']['xyz_min'])
xyz_max = torch.Tensor(st['model_kwargs']['xyz_max'])
else:
mask = mask.bool()
xyz_min = torch.Tensor(xyz_min)
xyz_max = torch.Tensor(xyz_max)
self.register_buffer('mask', mask)
xyz_len = xyz_max - xyz_min
self.register_buffer('xyz2ijk_scale', (torch.Tensor(list(mask.shape)) - 1) / xyz_len)
self.register_buffer('xyz2ijk_shift', -xyz_min * self.xyz2ijk_scale)
@torch.no_grad()
def forward(self, xyz):
'''Skip know freespace
@xyz: [..., 3] the xyz in global coordinate.
'''
shape = xyz.shape[:-1]
xyz = xyz.reshape(-1, 3)
mask = render_utils_cuda.maskcache_lookup(self.mask, xyz, self.xyz2ijk_scale, self.xyz2ijk_shift)
mask = mask.reshape(shape)
return mask
def extra_repr(self):
return f'mask.shape=list(self.mask.shape)'
| 11,636 | 46.11336 | 134 | py |
Voxurf | Voxurf-main/lib/load_blender.py | import os
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
def load_blender_data(basedir, half_res=False, testskip=1):
splits = ['train', 'val', 'test']
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s=='train' or testskip==0:
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']))
imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)
if half_res:
H = H//2
W = W//2
focal = focal/2.
imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))
for i, img in enumerate(imgs):
imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)
imgs = imgs_half_res
# imgs = tf.image.resize_area(imgs, [400, 400]).numpy()
return imgs, poses, render_poses, [H, W, focal], i_split
| 2,553 | 27.065934 | 115 | py |
Voxurf | Voxurf-main/lib/load_volsdf_bmvs.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def load_vbmvs_data(basedir, normallize=False, reso_level=1, mask=False):
rgb_paths = sorted(glob(os.path.join(basedir, 'image', '*jpg')))
mask_paths = sorted(glob(os.path.join(basedir, 'mask', '*png')))
render_cameras_name = 'cameras.npz'
camera_dict = np.load(os.path.join(basedir, render_cameras_name))
world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
if normallize:
scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(len(rgb_paths))]
else:
scale_mats_np = None
all_intrinsics = []
all_poses = []
all_imgs = []
all_masks = []
for i, (world_mat, im_name) in enumerate(zip(world_mats_np, rgb_paths)):
if normallize:
P = world_mat @ scale_mats_np[i]
else:
P = world_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
all_intrinsics.append(intrinsics)
all_poses.append(pose)
# all_poses.append(P)
if len(mask_paths) > 0:
all_masks.append((imageio.imread(mask_paths[i]) / 255.).astype(np.float32))
# all_imgs.append(cv.imread(im_name)/255)
all_imgs.append((imageio.imread(im_name) / 255.).astype(np.float32))
imgs = np.stack(all_imgs, 0)
poses = np.stack(all_poses, 0)
if mask:
assert len(mask_paths) > 0
masks = np.stack(all_masks, 0)
imgs = imgs * masks
H, W = imgs[0].shape[:2]
if reso_level > 1:
H, W = H//reso_level, W//reso_level
imgs = F.interpolate(torch.from_numpy(imgs).permute(0,3,1,2), size=(H, W)).permute(0,2,3,1).numpy()
K = all_intrinsics[0]
focal = all_intrinsics[0][0,0] / reso_level
i_split = [np.arange(len(imgs)), np.arange(len(imgs))[::6], np.arange(len(imgs))[::6]]
render_poses = poses[i_split[-1]]
return imgs, poses, render_poses, [H, W, focal], K, i_split | 2,879 | 34.121951 | 111 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/modeling.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import CLIPProcessor, CLIPModel
from torch import Tensor
from dataclasses import dataclass
from typing import Dict
from typing import Iterable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch import nn
# from .transcribe import transcribe as transcribe_function
# from .decoding import detect_language as detect_language_function, decode as decode_function
class Linear(nn.Linear):
def forward(self, x: Tensor) -> Tensor:
return F.linear(
x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype)
)
class AADV(nn.Module):
def __init__(self, config, CLIPConfig):
super().__init__()
self.config = config
self.temporal_position_embeddings = nn.Embedding(config.n_frames, config.transformer_width)
self.clip = CLIPModel(CLIPConfig)
attn_dropout = 0.1
is_add_bias_kv = True
is_add_zero_attn = True
self.temporal_self_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.query_multi_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.dialogue_multi_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.text_to_dialogue_attention = nn.MultiheadAttention(config.transformer_width, config.attention_heads,
dropout=attn_dropout,
add_bias_kv=is_add_bias_kv,
add_zero_attn=is_add_zero_attn)
self.gru = nn.GRU(config.transformer_width, config.transformer_width, num_layers=1)
self.combine_video_and_all_frame = nn.Linear(in_features=config.transformer_width * 2,
out_features=config.transformer_width)
self.combine_init = nn.Linear(in_features=config.transformer_width, out_features=config.transformer_width)
self.transform_all_frame_to_hidden = nn.Linear(in_features=512, out_features=config.transformer_width)
self.fuse_image= nn.Linear(in_features=config.transformer_width * 2,
out_features=config.transformer_width)
self.video_to_multimodal = nn.Linear(in_features=config.transformer_width,
out_features=config.transformer_width)
self.text_to_multimodal = nn.Linear(in_features=config.transformer_width, out_features=config.transformer_width)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.softmax = nn.Softmax(dim=-1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.loss_fct = CrossEntropyLoss()
def forward(self, inputs):
"""
:param inputs:
image_frames: (B x 20) x N
audio: B x 2 x 10000
summary: B x 77
script: B x 77
dialog: B x 10 x 77
all_ans: B x 10 x 77
all_frames: B x 768/512
:return: loss when training else None
"""
# pre-injection of all_frame feature
image_features = self.encode_image(inputs['image_frames'])
if self.config.search_key in ['script', 'summary']:
attn_mask = 1 - (inputs[self.config.search_key] == 0).long()
text_features = self.clip.get_text_features(inputs[self.config.search_key], attention_mask=attn_mask)
else:
# encoding dialogue query
text_features = self.encode_dialogue_query(inputs[self.config.search_key],
inputs[self.config.dialog_feature_key])
# r_text_features = text_features.unsqueeze(0).repeat(image_features.size(1), 1, 1) # added repeat
# text weighted representation of image representation
# query_to_image_attn = self.query_multi_attention(r_text_features.transpose(0, 1).contiguous(),
# image_features, image_features)[0].transpose(0, 1).contiguous()
# video_features = query_to_image_attn
image_features = image_features.transpose(0, 1).contiguous()
video_features = torch.sum(image_features, dim=1) / image_features.size(1)
n_video_features = torch.nn.functional.normalize(self.video_to_multimodal(video_features), p=2, dim=-1)
n_text_features = torch.nn.functional.normalize(self.text_to_multimodal(text_features), p=2, dim=-1)
logit_scale = self.logit_scale.exp()
# # original multiply
# logits = torch.mm(logit_scale * n_video_features, n_text_features.t())
# # text weighted multiply
# n_text_features = n_text_features.unsqueeze(1)
# logits = torch.bmm(logit_scale * n_video_features.transpose(0, 1).contiguous(),
# n_text_features.transpose(1, 2).contiguous()).squeeze(-1)
# labels = torch.tensor([i for i in range(text_features.size(0))], dtype=torch.long,
# device=self.config.device)
# loss_i = self.loss_fct(logits, labels)
# loss_e = self.loss_fct(logits.t(), labels)
# loss = (loss_i + loss_e) / 2
return image_features, n_video_features, n_text_features, logit_scale
def encode_image(self, images):
image_features = self.clip.get_image_features(images)
temporal_pos = torch.tensor(
[[i for i in range(self.config.n_frames)] for j in range(images.size(0) // self.config.n_frames)],
dtype=torch.int, device=self.config.device).view(-1)
frame_temporal_pos_embed = self.temporal_position_embeddings(temporal_pos)
image_features = (image_features + frame_temporal_pos_embed).view(images.size(0) // self.config.n_frames,
self.config.n_frames, -1)
image_features = image_features.transpose(0, 1).contiguous()
self_attn_image_features = self.temporal_self_attention(image_features, image_features, image_features)[0]
return self_attn_image_features
def encode_dialogue_query(self, dialogues, text_desc):
"""''
dialogues: B x 10 x 77
''"""
dialogues = dialogues[:, :self.config.dialog_runs, :]
d_input = dialogues.reshape(dialogues.size(0) * dialogues.size(1), -1).contiguous()
d_attn_mask = 1 - (d_input == 0).long()
dialogue_features = self.clip.get_text_features(d_input, attention_mask=d_attn_mask). \
view(dialogues.size(0), dialogues.size(1), -1)
dialogue_features = dialogue_features.transpose(0, 1).contiguous()
dialogue_features = self.dialogue_multi_attention(dialogue_features, dialogue_features, dialogue_features)[0]
dialogue_features, _ = self.gru(dialogue_features)
text_features = self.clip.get_text_features(text_desc, attention_mask=(1 - (text_desc == 0).long()))
dialogue_features = self.text_to_dialogue_attention(text_features.unsqueeze(1).transpose(0, 1).contiguous(),
dialogue_features, dialogue_features)[0].squeeze(0)
text_features = self.combine_init(dialogue_features + text_features)
# text_features = self.combine_init(torch.cat([text_features, dialogue_features], dim=-1))
return text_features
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class LayerNorm(nn.LayerNorm):
def forward(self, x: Tensor) -> Tensor:
return super().forward(x.float()).type(x.dtype)
class Linear(nn.Linear):
def forward(self, x: Tensor) -> Tensor:
return F.linear(
x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype)
)
class Conv1d(nn.Conv1d):
def _conv_forward(self, x: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
return super()._conv_forward(
x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype)
)
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
class MultiHeadAttention(nn.Module):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = Linear(n_state, n_state)
self.key = Linear(n_state, n_state, bias=False)
self.value = Linear(n_state, n_state)
self.out = Linear(n_state, n_state)
def forward(
self,
x: Tensor,
xa: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
kv_cache: Optional[dict] = None,
):
q = self.query(x)
if kv_cache is None or xa is None or self.key not in kv_cache:
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
# otherwise, perform key/value projections for self- or cross-attention as usual.
k = self.key(x if xa is None else xa)
v = self.value(x if xa is None else xa)
else:
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
k = kv_cache[self.key]
v = kv_cache[self.value]
wv = self.qkv_attention(q, k, v, mask)
return self.out(wv)
def qkv_attention(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head) ** -0.25
q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
w = F.softmax(qk.float(), dim=-1).to(q.dtype)
return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2)
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp = nn.Sequential(Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state))
self.mlp_ln = LayerNorm(n_state)
def forward(
self,
x: Tensor,
xa: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
kv_cache: Optional[dict] = None,
):
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)
if self.cross_attn:
x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)
x = x + self.mlp(self.mlp_ln(x))
return x
class AudioEncoder(nn.Module):
def __init__(self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int):
super().__init__()
self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
)
self.ln_post = LayerNorm(n_state)
def forward(self, x: Tensor):
"""
x : torch.Tensor, shape = (batch_size, n_mels, n_ctx)
the mel spectrogram of the audio
"""
x = F.gelu(self.conv1(x))
x = F.gelu(self.conv2(x))
x = x.permute(0, 2, 1)
assert x.shape[1:] == self.positional_embedding.shape, "incorrect audio shape"
x = (x + self.positional_embedding).to(x.dtype)
for block in self.blocks:
x = block(x)
x = self.ln_post(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x[:1], key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x.squeeze(0)
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 29,954 | 40.146978 | 122 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/data_preprocess.py | from tqdm import tqdm
import json
import codecs
import requests
import pandas as pd
from transformers import BertTokenizer, AutoTokenizer
from os import listdir
from os.path import isfile, join
import torch
import numpy as np
import random
json_load = lambda x: json.load(codecs.open(x, 'r', encoding='utf-8'))
json_dump = lambda d, p: json.dump(d, codecs.open(p, 'w', 'utf-8'), indent=2, ensure_ascii=False)
def draw_samples(lis, ratio):
samples = ratio if ratio > 1 else int(ratio * len(lis))
if samples > len(lis):
new_lis = np.random.choice(len(lis), samples, replace=True)
else:
new_lis = np.random.choice(len(lis), samples, replace=False)
n_lis = [lis[i] for i in new_lis]
return n_lis
def inspect_avsd():
dir = 'data/avsd/avsd_val.json'
js = json_load(dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def measure_avg_len(examples, key):
lens = 0
overlong = 0
for e in examples:
e = examples[e]
if e[key] is None or len(e[key]) == 0:
continue
te = tokenizer.tokenize(e[key])
if len(te) >= 60:
overlong += 1
lens += len(te)
print(overlong)
return lens / len(examples)
avg_len_sum = measure_avg_len(js, 'summary')
avg_len_script = measure_avg_len(js, 'script')
return
def extract_audio_from_video():
import moviepy.editor as mp
path = 'data/avsd/videos/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for f in tqdm(onlyfiles):
dir = path + f
clip = mp.VideoFileClip(dir)
clip.audio.write_audiofile('data/avsd/audios/{}.wav'.format(f.split('.')[0]))
return
def sample_frames_from_video():
# Importing all necessary libraries
import cv2
path = 'data/avsd/videos/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
frames_per_video = 60
for f in tqdm(onlyfiles):
# Read the video from specified path
cam = cv2.VideoCapture(path + f)
# frame
currentframe = 0
all_frames = []
while (True):
# reading from frame
ret, frame = cam.read()
if ret:
all_frames.append(frame)
currentframe += 1
else:
break
lens = len(all_frames)
if lens >= frames_per_video:
interval = lens // frames_per_video
frame_ind = [i * interval for i in range(frames_per_video)]
for i in range(len(frame_ind)):
if frame_ind[i] >= lens:
frame_ind[i] = lens - 1
frame_ind[-1] = lens - 1
sampled_frames = [all_frames[i] for i in frame_ind]
else:
sampled_frames = sorted(draw_samples([i for i in range(len(all_frames))], frames_per_video))
sampled_frames = [all_frames[i] for i in sampled_frames]
for ind, frame in enumerate(sampled_frames):
cv2.imwrite('data/avsd/frames/{}_{}.jpg'.format(f.split('.')[0], str(ind)), frame)
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def preprocess_avsd_to_tensor_dataset():
import clip
import torch
from transformers import AutoTokenizer, AutoFeatureExtractor
import pickle
image_dir = 'data/avsd/frames/'
audio_dir = 'data/avsd/audios/'
train_metadata_dir = 'data/avsd/avsd_train.json'
val_metadata_dir = 'data/avsd/avsd_val.json'
test_metadata_dir = 'data/avsd/avsd_test.json'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-L/14@336px", device=device)
torch.random.manual_seed(0)
def read_image_and_audio(metadata_dir, split='train'):
metadata = json_load(metadata_dir)
all_video_names = []
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = [], [], [], [], [], []
for ind, key in enumerate(tqdm(metadata)):
md = metadata[key]
all_video_names.append(key)
'''
Abandoned due to significant use of memory
'''
# all_frames = []
# frame_index = sorted(draw_samples([i for i in range(20)], 10))
# for ind in frame_index:
# frame = preprocess(Image.open('{}{}_{}.jpg'.format(image_dir, key, str(ind))))
# all_frames.append(frame)
# all_frames = torch.cat(all_frames, dim=0)
all_frames = torch.tensor([ind], dtype=torch.int)
summary = md['summary'] if md['summary'] is not None else md['script']
script = md['script']
t_summary = clip.tokenize(summary, context_length=77, truncate=True)
t_script = clip.tokenize(script, context_length=77, truncate=True)
all_t_q = []
for dialog in md['data']:
q = dialog['question'] + ' ' + dialog['answer']
t_q = clip.tokenize(q, context_length=77, truncate=True)
all_t_q.append(t_q)
all_t_q = torch.cat(all_t_q, dim=0)
all_t_ans = []
for dialog in md['data']:
ans = dialog['answer']
t_ans = clip.tokenize(ans, context_length=77, truncate=True)
all_t_ans.append(t_ans)
all_t_ans = torch.cat(all_t_ans, dim=0)
all_images.append(all_frames)
all_audios.append(all_frames)
all_summaries.append(t_summary)
all_scripts.append(t_script)
all_dialogs.append(all_t_q)
all_ans_in_dialog.append(all_t_ans)
pickle.dump(
[all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog],
open('data/avsd/{}.cache'.format(split), "wb"), protocol=4)
video_names = {'split': split, 'data': all_video_names}
json_dump(video_names, 'data/avsd/{}_video_names.json'.format(split))
read_image_and_audio(train_metadata_dir, split='train')
read_image_and_audio(val_metadata_dir, split='val')
read_image_and_audio(test_metadata_dir, split='test')
def process_dialogs():
special_tokens = json_load('../dialog/additional_special_tokens.json')
def filtering(line):
for sp in special_tokens['additional_special_tokens'][:10]:
line = line.replace(sp, '')
return line
def output_dialogs_by_task(task_key, split):
data_dir = '../dialog/{}.csv'.format(split)
if split == 'dev':
split = 'val'
df = pd.read_csv(data_dir)
hist = list(df['history'])
inputs = list(df['input'])
target = list(df['target'])
tasks = list(df['task'])
source_lines, target_lines = [], []
for h, inp, targ, task in zip(hist, inputs, target, tasks):
if task == task_key:
if str(h) == 'nan':
h = ''
line = filtering(str(h) + ' ' + str(inp))
targf = filtering(targ)
if line.replace(' ', '') == '' or targf.replace(' ', '') == '':
continue
source_lines.append(line)
target_lines.append(str(targf))
with open('../dialog/dialog-task/{}/{}.source'.format(task_key, split), 'w') as f:
for line in source_lines:
f.writelines(line.replace('\n', '').strip() + '\n')
with open('../dialog/dialog-task/{}/{}.target'.format(task_key, split), 'w') as f:
for line in target_lines:
f.writelines(line.replace('\n', '').strip() + '\n')
task_keys = ['NLU', 'DST', 'NLG']
splits = ['train', 'dev', 'test']
for tk in task_keys:
for sp in splits:
output_dialogs_by_task(tk, sp)
def sample_frames_from_video_for_val_set():
# Importing all necessary libraries
import cv2
path = 'data/avsd/videos/Charades_v1/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
video_names = json_load("data/avsd/val_video_names.json")['data']
video_names = {vn: 0 for vn in video_names}
frames_per_video = 60
for ind, f in tqdm(enumerate(onlyfiles)):
if ind <= 9722:
continue
vn = f.split('.')[0]
if vn in video_names:
continue
# Read the video from specified path
cam = cv2.VideoCapture(path + f)
# frame
currentframe = 0
all_frames = []
while True:
# reading from frame
ret, frame = cam.read()
if ret:
all_frames.append(frame)
currentframe += 1
else:
break
lens = len(all_frames)
if lens >= frames_per_video:
interval = lens // frames_per_video
frame_ind = [i * interval for i in range(frames_per_video)]
for i in range(len(frame_ind)):
if frame_ind[i] >= lens:
frame_ind[i] = lens - 1
sampled_frames = [all_frames[i] for i in frame_ind]
else:
sampled_frames = sorted(draw_samples([i for i in range(len(all_frames))], frames_per_video))
sampled_frames = [all_frames[i] for i in sampled_frames]
for ind, frame in enumerate(sampled_frames):
cv2.imwrite('data/avsd/videos/frames/{}_{}.jpg'.format(f.split('.')[0], str(ind)), frame)
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
def retokenize_avsd_to_tensor_dataset():
import clip
import torch
from transformers import AutoTokenizer
import pickle
image_dir = 'data/avsd/videos/frames/'
audio_dir = 'data/avsd/videos/audios/'
train_metadata_dir = 'data/avsd/avsd_train.json'
val_metadata_dir = 'data/avsd/avsd_val.json'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-L/14@336px", device=device)
tokenizer = AutoTokenizer.from_pretrained('openai/clip-vit-base-patch16')
def read_image_and_audio(metadata_dir, split='train'):
metadata = json_load(metadata_dir)
video_names = json_load('data/avsd/{}_video_names.json'.format(split))
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = [], [], [], [], [], []
for ind, key in enumerate(tqdm(video_names)):
md = metadata[key]
'''
Abandoned due to significant use of memory
'''
# all_frames = []
# frame_index = sorted(draw_samples([i for i in range(20)], 10))
# for ind in frame_index:
# frame = preprocess(Image.open('{}{}_{}.jpg'.format(image_dir, key, str(ind))))
# all_frames.append(frame)
# all_frames = torch.cat(all_frames, dim=0)
all_frames = torch.tensor([ind], dtype=torch.int)
summary = md['summary'] if md['summary'] is not None else md['script']
script = md['script']
t_summary = clip.tokenize(summary, context_length=77, truncate=True)
t_script = clip.tokenize(script, context_length=77, truncate=True)
all_t_q = []
for dialog in md['data']:
q = dialog['question'] + ' ' + dialog['answer']
t_q = clip.tokenize(q, context_length=77, truncate=True)
all_t_q.append(t_q)
all_t_q = torch.cat(all_t_q, dim=0)
all_t_ans = []
for dialog in md['data']:
ans = dialog['answer']
t_ans = clip.tokenize(ans, context_length=77, truncate=True)
all_t_ans.append(t_ans)
all_t_ans = torch.cat(all_t_ans, dim=0)
all_images.append(all_frames)
all_summaries.append(t_summary)
all_scripts.append(t_script)
all_dialogs.append(all_t_q)
all_ans_in_dialog.append(all_t_ans)
pickle.dump(
[all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog],
open('data/avsd/{}.cache'.format(split), "wb"), protocol=4)
read_image_and_audio(train_metadata_dir, split='train')
read_image_and_audio(val_metadata_dir, split='val')
def positionalencoding1d(d_model, length):
"""
:param d_model: dimension of the model
:param length: length of positions
:return: length*d_model position matrix
"""
import math
if d_model % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(d_model))
pe = torch.zeros(length, d_model)
position = torch.arange(0, length).unsqueeze(1)
div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float) *
-(math.log(10000.0) / d_model)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
def resize_images():
from PIL import Image
path = 'data/avsd/videos/frames/'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
t = 0
indices_t = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 51, 59]
indices_e = [0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 170, 199]
indices = set(indices_e + indices_t)
for f in tqdm(onlyfiles):
ind = int(f.replace('.jpg', '').split('_')[1])
if ind not in indices:
continue
image = Image.open(path + f)
image.thumbnail((336, 336))
image.save(path.replace('frames', 'frames_resize') + f)
if __name__ == '__main__':
preprocess_avsd_to_tensor_dataset()
sample_frames_from_video()
extract_audio_from_video()
| 13,897 | 32.570048 | 115 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/run_dialogue_to_video_retrieval.py | """ running training and evaluation code for dialogue-to-video retrieval
Created by Chenyang Lyu
"""
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import TensorDataset
from transformers import CLIPProcessor, CLIPModel, CLIPConfig
import torch.distributed as dist
from torch.nn import CrossEntropyLoss
import argparse
import sklearn.metrics as metric
import glob
import logging
import os
import random
import numpy as np
import json
import pickle
import codecs
from PIL import Image
from tqdm import tqdm, trange
from sklearn.metrics import top_k_accuracy_score
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
from modeling import AADV
import clip
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
json_load = lambda x: json.load(codecs.open(x, 'r', encoding='utf-8'))
json_dump = lambda d, p: json.dump(d, codecs.open(p, 'w', 'utf-8'), indent=2, ensure_ascii=False)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def draw_samples(lis, ratio):
samples = ratio if ratio > 1 else int(ratio * len(lis))
if samples > len(lis):
new_lis = np.random.choice(len(lis), samples, replace=True)
else:
new_lis = np.random.choice(len(lis), samples, replace=False)
n_lis = [lis[i] for i in new_lis]
return n_lis
def train(args, model, train_dataset, preprocess, val_set=None):
""" Training the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
train_dataset, train_video_names = train_dataset
args.train_batch_size = args.train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
t_total = (len(train_dataloader) * args.num_train_epochs) // args.gradient_accumulation_steps
# Prepare optimizer for training
no_decay = ["bias", "LayerNorm.weight"]
optimizer_group_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
}
]
optimizer = AdamW(optimizer_group_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_steps * t_total),
num_training_steps=t_total)
loss_fct = CrossEntropyLoss()
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
# Skip past any already trained steps
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
batch = tuple(t.to(args.device) for t in batch)
train_video_ind = list(batch[0].cpu().numpy())
all_image_frames = []
for vid in train_video_ind:
_all_image_frames = []
for vfi in args.train_frame_ind:
frame = preprocess(
Image.open('{}{}_{}.jpg'.format(args.image_dir, train_video_names['data'][vid], str(vfi))))
_all_image_frames.append(frame.unsqueeze(0))
_all_image_frames = torch.cat(_all_image_frames, dim=0).unsqueeze(0)
all_image_frames.append(_all_image_frames)
all_image_frames = torch.cat(all_image_frames, dim=0).to(args.device)
inputs = {'image_frames': all_image_frames,
'audio': batch[1],
'summary': batch[2],
'script': batch[3],
'dialog': batch[4],
'ans_in_dialog': batch[5],
}
image_features, n_video_features, n_text_features, logit_scale = model(inputs)
logit_scale = logit_scale[0] if args.n_gpu > 1 else logit_scale
logits = torch.mm(logit_scale * n_video_features, n_text_features.t())
labels = torch.tensor([i for i in range(n_text_features.size(0))], dtype=torch.long,
device=args.device)
loss_i = loss_fct(logits, labels)
loss_e = loss_fct(logits.t(), labels)
loss = (loss_i + loss_e) / 2
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if args.local_rank == -1 and global_step % args.eval_steps == 0 and val_set is not None: # Only evaluate when single GPU otherwise metrics may not average well
evaluate(args, model, preprocess, val_set)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
torch.save(model_to_save.state_dict(), args.output_dir + 'model.pt')
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_last_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# if args.save_steps > 0 and global_step % args.save_steps == 0:
# # Save model checkpoint
# output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# model_to_save = model if hasattr(model, 'module') else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(output_dir)
# torch.save(args, os.path.join(output_dir, 'training_args.bin'))
# logger.info("Saving model checkpoint to %s", output_dir)
if args.local_rank in [-1, 0]:
tb_writer.close()
global_step = 1 if global_step == 0 else global_step
return global_step, tr_loss / global_step
def evaluate(args, model, preprocess, eval_dataset, prefix=""):
eval_dataset, eval_video_names = eval_dataset
args._eval_batch_size = args.eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args._eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
model_module = model.module
else:
model_module = model
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_image_features = []
all_text_features = []
with torch.no_grad():
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model_module.eval()
batch = tuple(t.to(args.device) for t in batch)
eval_video_ind = list(batch[0].cpu().numpy())
all_image_frames = []
for vid in eval_video_ind:
_all_image_frames = []
for vfi in args.eval_frame_ind:
frame = preprocess(
Image.open('{}{}_{}.jpg'.format(args.image_dir, eval_video_names['data'][vid], str(vfi))))
_all_image_frames.append(frame.unsqueeze(0))
_all_image_frames = torch.cat(_all_image_frames, dim=0).unsqueeze(0)
all_image_frames.append(_all_image_frames)
all_image_frames = torch.cat(all_image_frames, dim=0).to(args.device)
inputs = {'image_frames': all_image_frames,
'audio': batch[1],
'summary': batch[2],
'script': batch[3],
'dialog': batch[4],
'ans_in_dialog': batch[5],
}
# image encoding without self-attention
all_image_features.append(model_module.encode_image(inputs['image_frames']).transpose(0, 1))
if args.search_key in ['script', 'summary']:
all_text_features.append(model_module.clip.get_text_features(inputs[args.search_key]))
else:
all_text_features.append(model_module.encode_dialogue_query(inputs[args.search_key],
inputs[args.dialog_feature_key]))
# print(all_image_features[-1].size(), all_text_features[-1].size())
all_image_features = torch.cat(all_image_features, dim=0)
all_video_features = torch.sum(all_image_features, dim=1) / all_image_features.size(1)
all_text_features = torch.cat(all_text_features, dim=0)
# r_text_features = all_text_features.unsqueeze(0).repeat(all_text_features.size(0), 1, 1) # added repeat
# r_text_features = r_text_features.transpose(0, 1)
# all_image_features = all_image_features.transpose(0, 1)
# all_image_features = model_module.query_multi_attention(r_text_features,
# all_image_features,
# all_image_features)[0].transpose(0, 1).to('cuda')
# model.to('cuda')
# # with l2 norm
t_video_features = torch.nn.functional.normalize(model_module.video_to_multimodal(all_video_features), p=2,
dim=-1)
t_text_features = torch.nn.functional.normalize(model_module.text_to_multimodal(all_text_features), p=2, dim=-1)
# # without l2 norm
# t_video_features = model.video_to_multimodal(all_image_features)
# t_text_features = model.text_to_multimodal(all_text_features)
logit_scale = model_module.logit_scale.exp()
# original multiply
logits = torch.mm(logit_scale * t_video_features, t_text_features.t())
# text weighted multiply
# t_text_features = t_text_features.unsqueeze(1)
# logits = torch.bmm(logit_scale * t_video_features.transpose(0, 1),
# t_text_features.transpose(1, 2)).squeeze(-1)
logits = logits.cpu().numpy()
labels = [i for i in range(t_video_features.size(0))]
top_1 = top_k_accuracy_score(labels, logits, k=1)
top_5 = top_k_accuracy_score(labels, logits, k=5)
top_10 = top_k_accuracy_score(labels, logits, k=10)
print('Metrics: top-1: {}, top-5: {}, top-10: {}'.format(str(round(100 * top_1, 2)),
str(round(100 * top_5, 2)),
str(round(100 * top_10, 2))))
evaluate_rank(logits, labels)
return
def evaluate_rank(sim_matrix, labels):
ranks = []
for logits, label in zip(sim_matrix, labels):
logits_w_ind = {ind: logit for ind, logit in enumerate(logits)}
rank_list = [key for key, value in sorted(logits_w_ind.items(), key=lambda item: item[1], reverse=True)]
ranks.append(rank_list.index(label) + 1)
print('Metrics: median rank: {}, mean rank: {}'.format(str(np.median(ranks)), str(np.mean(ranks))))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", type=str, default="avsd",
help="the name of the training task (the dataset name)")
parser.add_argument("--model_size", type=str, default="16",
help="the size of pre-trained CLIP model (ViT-16 or ViT-32)")
parser.add_argument("--num_train_epochs", type=int, default=10,
help="the numebr of training epochs")
parser.add_argument("--do_train", action="store_true",
help="whether to train the model or not")
parser.add_argument("--do_eval", action="store_true",
help="whether to evaluate the model or not")
parser.add_argument("--weight_decay", type=float, default=0.0,
help="the weight decay rate")
parser.add_argument("--learning_rate", type=float, default=1e-5,
help="the learning rate used to train the model")
parser.add_argument("--warmup_steps", type=float, default=0.0,
help="the warm_up step rate")
parser.add_argument("--seed", type=int, default=0,
help="the random seed used in model initialization and dataloader")
parser.add_argument("--train_batch_size", type=int, default=16,
help="the batch size used in training")
parser.add_argument("--eval_batch_size", type=int, default=16,
help="the batch size used in evaluation")
parser.add_argument("--logging_steps", type=int, default=50,
help="the logging steps")
parser.add_argument("--eval_steps", type=int, default=500,
help="conduct evaluation every eval_steps")
parser.add_argument("--device", type=int, default=0,
help="the device id used for training and evaluation")
parser.add_argument("--n_gpu", type=int, default=1,
help="number of gpus being used")
parser.add_argument("--attention_heads", type=int, default=8,
help="the attention heads used in multi head attention function")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="number of updates steps to accumulate before performing a backward/update pass")
parser.add_argument("--search_key", type=str, default="dialog",
help="the key used in retrieval")
parser.add_argument("--dialog_feature_key", type=str, default="summary",
help="the key used in dialog feature fusion")
parser.add_argument("--n_frames", type=int, default=6,
help="the frames sampled from each video in training")
parser.add_argument("--eval_n_frames", type=int, default=6,
help="the frames sampled from each video in evaluation")
parser.add_argument("--all_frame_feature_ratio", type=float, default=1.0,
help="the coefficient used to multiply with all frame features in training")
parser.add_argument("--eval_all_frame_feature_ratio", type=float, default=1.0,
help="the coefficient used to multiply with all frame features in evaluation")
parser.add_argument("--dialog_runs", type=int, default=10,
help="the runs of dialog query used in training and evaluation")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--image_dir", type=str, default="data/avsd/frames/",
help="the directory used to store video frames.")
parser.add_argument("--clip_model_name", type=str, default="openai/clip-vit-base-patch16",
help="the name for the CLIP model used in training.")
parser.add_argument("--clip_processor_name", type=str, default="ViT-B/16",
help="the name for the CLIP processor used in training.")
args, _ = parser.parse_known_args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
args.adam_epsilon = 1e-8
args.max_grad_norm = 5.0
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
clip_model_name = args.clip_model_name # openai/clip-vit-large-patch14-336
clip_processor_name = args.clip_processor_name # "ViT-L/14@336px"
clip_config = CLIPConfig.from_pretrained(clip_model_name)
args.transformer_width = clip_config.projection_dim
args.audio_feature_dim = 20000
args.t_frames = 60
args.e_frames = 60
interval = args.t_frames // args.n_frames
frame_ind = [i * interval for i in range(args.n_frames)]
for i in range(len(frame_ind)):
if frame_ind[i] >= args.t_frames:
frame_ind[i] = args.t_frames - 1
frame_ind[-1] = args.t_frames - 1
args.train_frame_ind = frame_ind
# randomly sampled index
# args.frame_ind = draw_samples([i for i in range(args.t_frames)], args.n_frames)
# args.eval_n_frames = 30
interval = args.e_frames // args.n_frames
frame_ind = [i * interval for i in range(args.eval_n_frames)]
for i in range(len(frame_ind)):
if frame_ind[i] >= args.e_frames:
frame_ind[i] = args.e_frames - 1
frame_ind[-1] = args.e_frames - 1
args.eval_frame_ind = frame_ind
# args.eval_frame_ind = args.train_frame_ind
if 'large' in args.image_dir:
args.train_frame_ind = [i for i in range(args.n_frames)]
args.eval_frame_ind = [i for i in range(args.n_frames)]
model_prefix = 'video_retrieval'
args.output_dir = 'trained_models/dialog_to_video_retrieval/' \
'{}_{}_epochs-{}_lr-{}'.format(model_prefix,
args.search_key,
str(args.num_train_epochs),
str(args.learning_rate))
if args.local_rank in [-1, 0]:
print(args.output_dir)
data_dirs = ["data/avsd/train.cache", "data/avsd/val.cache", "data/avsd/test.cache"]
video_names = ["data/avsd/train_video_names.json", "data/avsd/val_video_names.json", "data/avsd/test_video_names.json"]
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[0], 'rb'))
train_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0) for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0), torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0) for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0) for ans in all_ans_in_dialog], dim=0))
train_dataset = (train_dataset, json_load(video_names[0]))
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[1], 'rb'))
val_video_names = json_load(video_names[1])
val_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0) for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0),
torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0) for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0) for ans in all_ans_in_dialog], dim=0))
val_dataset = (val_dataset, {'data': val_video_names['data']})
all_images, all_audios, all_summaries, all_scripts, all_dialogs, all_ans_in_dialog = pickle.load(
open(data_dirs[2], 'rb'))
test_video_names = json_load(video_names[2])
test_dataset = TensorDataset(torch.cat(all_images, dim=0),
torch.cat([audio.unsqueeze(0)
for audio in all_audios], dim=0),
torch.cat(all_summaries, dim=0),
torch.cat(all_scripts, dim=0),
torch.cat([dialog.unsqueeze(0)
for dialog in all_dialogs], dim=0),
torch.cat([ans.unsqueeze(0)
for ans in all_ans_in_dialog], dim=0))
test_dataset = (test_dataset, {'data': test_video_names['data']})
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, preprocess = clip.load(clip_processor_name, device=device)
del clip_model
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model = AADV(args, clip_config)
model.clip = CLIPModel.from_pretrained(clip_model_name)
model.to(args.device)
global_step, tr_loss = train(args, model, train_dataset, preprocess, val_set=val_dataset)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
torch.save(model_to_save.state_dict(), args.output_dir + 'model.pt')
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
model = AADV(args, clip_config)
model.load_state_dict(torch.load(checkpoint + 'model.pt'))
model.to(args.device)
evaluate(args, model, preprocess, val_dataset)
evaluate(args, model, preprocess, test_dataset)
return
if __name__ == '__main__':
main()
| 25,440 | 46.025878 | 180 | py |
PLRDiff | PLRDiff-main/test_single.py | import argparse
import os
import numpy as np
import torch as th
import torch.nn.functional as nF
from pathlib import Path
from guided_diffusion import utils
from guided_diffusion.create import create_model_and_diffusion_RS
import scipy.io as sio
from collections import OrderedDict
from os.path import join
from skimage.metrics import peak_signal_noise_ratio as PSNR
from skimage.metrics import structural_similarity as SSIM
from guided_diffusion.core import imresize
from math import sqrt, log
def blur_kernel(shape, var):
assert shape%2==1
mu = int((shape - 1)/2)
XX, YY = np.meshgrid(np.arange(shape), np.arange(shape))
out = np.exp((-(XX-mu)**2-(YY-mu)**2)/(2*var**2))
return np.float32(out/out.sum())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--baseconfig', type=str, default='base.json',
help='JSON file for creating model and diffusion')
parser.add_argument('-gpu', '--gpu_ids', type=str, default="0")
parser.add_argument('-dr', '--dataroot', type=str, default='') # dataroot with
parser.add_argument('-bs', '--batch_size', type=int, default=1)
parser.add_argument('-sr', '--savedir', type=str, default='./results')
parser.add_argument('-eta1', '--eta1', type=float, default=1) # trade off parameters 1
parser.add_argument('-eta2', '--eta2', type=float, default=2) # trade off parameters 2
parser.add_argument('-seed', '--seed', type=int, default=0)
parser.add_argument('-dn', '--dataname', type=str, default='') # dataname: used for save output
parser.add_argument('-step', '--step', type=int, default=1000) # diffusion steps
parser.add_argument('-scale', '--scale', type=int, default=4) # downsample scale
parser.add_argument('-ks', '--kernelsize', type=int, default=9) # kernel size
parser.add_argument('-sig', '--sig', type=float, default=None) # kernel variance
parser.add_argument('-sn', '--samplenum', type=int, default=1) # sample number
parser.add_argument('-rs', '--resume_state', type=str, default='') # path: pretrained model
## parse configs
args = parser.parse_args()
opt = utils.parse(args)
opt = utils.dict_to_nonedict(opt)
opt['diffusion']['diffusion_steps'] = opt['step']
device = th.device("cuda")
## create model and diffusion process
model, diffusion = create_model_and_diffusion_RS(opt)
## seed
seeed = opt['seed']
print(seeed)
np.random.seed(seeed)
th.manual_seed(seeed)
th.cuda.manual_seed(seeed)
## load model
load_path = opt['resume_state']
gen_path = '{}_gen.pth'.format(load_path)
cks = th.load(gen_path)
new_cks = OrderedDict()
for k, v in cks.items():
newkey = k[11:] if k.startswith('denoise_fn.') else k
new_cks[newkey] = v
model.load_state_dict(new_cks, strict=False)
model.to(device)
model.eval()
## params
param = dict()
param['scale'] = opt['scale']
param['eta1'] = opt['eta1']
param['eta2'] = opt['eta2']
k_s = opt['kernelsize']
if opt['sig'] is None:
sig = sqrt(4**2/(8*log(2)))
else:
sig = opt['sig']
## img
data = sio.loadmat(opt['dataroot'])
HRMS = th.from_numpy(np.float32(data['HRMS']))
ms, Ch = HRMS.shape[0], HRMS.shape[-1]
HRMS = HRMS.permute(2,0,1).unsqueeze(0)
param['k_s'] = k_s
kernel = blur_kernel(k_s, sig)
kernel = th.from_numpy(kernel).repeat(Ch,1,1,1)
param['kernel'] = kernel.to(device)
if opt['dataname'] == 'Chikusei':
param['Band'] = th.Tensor([31,63,95]).type(th.int).to(device)
elif opt['dataname'] == 'Houston':
param['Band'] = th.Tensor([35,71,107]).type(th.int).to(device)
elif opt['dataname'] == 'Pavia':
param['Band'] = th.Tensor([25,51,77]).type(th.int).to(device)
PH = th.from_numpy(np.float32(data['H'])).unsqueeze(0).unsqueeze(-1)
param['PH'] = PH.to(device)
PAN = th.from_numpy(np.float32(data['PAN'])).unsqueeze(0).unsqueeze(0)
LRMS = nF.conv2d(HRMS, kernel.to(HRMS.device), padding=int((k_s - 1)/2), groups=Ch)
LRMS = imresize(LRMS, 1/opt['scale'])
model_condition = {'LRMS': LRMS.to(device), 'PAN': PAN.to(device)}
out_path = Path(join(opt['savedir'], str(opt['scale'])+"_"+str(opt['kernelsize'])+"_"+str(sig)))
out_path.mkdir(parents=True, exist_ok=True)
Rr = 3 # spectral dimensironality of subspace
dname = opt['dataname']
for j in range(opt['samplenum']):
sample,E = diffusion.p_sample_loop(
model,
(1, Ch, ms, ms),
Rr = Rr,
clip_denoised=True,
model_condition=model_condition,
param=param,
save_root=out_path,
progress=True,
)
sample = (sample + 1)/2
im_out = th.matmul(E, sample.reshape(opt['batch_size'], Rr, -1)).reshape(opt['batch_size'], Ch, ms, ms)
im_out = im_out.cpu().squeeze(0).permute(1,2,0).numpy()
HRMS = HRMS.squeeze(0).permute(1,2,0).numpy()
LRMS = LRMS.squeeze(0).permute(1,2,0).numpy()
PAN = PAN.squeeze(0).permute(1,2,0).numpy()
A = sample.cpu().squeeze(0).permute(1,2,0).numpy()
E = E.cpu().squeeze(0).numpy()
psnr = 0
for i in range(Ch):
psnr += PSNR(HRMS, im_out)
psnr /= Ch
ssim = SSIM(HRMS, im_out)
## save output
sio.savemat(join(out_path, opt['dataname']+str(opt['step'])+".mat"), {'Rec': im_out, 'HRMS': HRMS, 'LRMS': LRMS, 'PAN':PAN, 'E':E, 'A':A})
print(f"{dname:s} \t PSNR: \t {psnr:.2f} \t SSIM: \t {ssim:.4f} \n")
| 5,697 | 35.292994 | 146 | py |
PLRDiff | PLRDiff-main/guided_diffusion/core.py | '''
copied from
https://github.com/sanghyun-son/bicubic_pytorch
A standalone PyTorch implementation for fast and efficient bicubic resampling.
The resulting values are the same to MATLAB function imresize('bicubic').
## Author: Sanghyun Son
## Email: sonsang35@gmail.com (primary), thstkdgus35@snu.ac.kr (secondary)
## Version: 1.2.0
## Last update: July 9th, 2020 (KST)
Depencency: torch
Example::
>>> import torch
>>> import core
>>> x = torch.arange(16).float().view(1, 1, 4, 4)
>>> y = core.imresize(x, sizes=(3, 3))
>>> print(y)
tensor([[[[ 0.7506, 2.1004, 3.4503],
[ 6.1505, 7.5000, 8.8499],
[11.5497, 12.8996, 14.2494]]]])
'''
import math
import typing
import torch
from torch.nn import functional as F
__all__ = ['imresize']
_I = typing.Optional[int]
_D = typing.Optional[torch.dtype]
def nearest_contribution(x: torch.Tensor) -> torch.Tensor:
range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5))
cont = range_around_0.to(dtype=x.dtype)
return cont
def linear_contribution(x: torch.Tensor) -> torch.Tensor:
ax = x.abs()
range_01 = ax.le(1)
cont = (1 - ax) * range_01.to(dtype=x.dtype)
return cont
def cubic_contribution(x: torch.Tensor, a: float=-0.5) -> torch.Tensor:
ax = x.abs()
ax2 = ax * ax
ax3 = ax * ax2
range_01 = ax.le(1)
range_12 = torch.logical_and(ax.gt(1), ax.le(2))
cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
cont_01 = cont_01 * range_01.to(dtype=x.dtype)
cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
cont_12 = cont_12 * range_12.to(dtype=x.dtype)
cont = cont_01 + cont_12
return cont
def gaussian_contribution(x: torch.Tensor, sigma: float=2.0) -> torch.Tensor:
range_3sigma = (x.abs() <= 3 * sigma + 1)
# Normalization will be done after
cont = torch.exp(-x.pow(2) / (2 * sigma**2))
cont = cont * range_3sigma.to(dtype=x.dtype)
return cont
def discrete_kernel(
kernel: str, scale: float, antialiasing: bool=True) -> torch.Tensor:
'''
For downsampling with integer scale only.
'''
downsampling_factor = int(1 / scale)
if kernel == 'cubic':
kernel_size_orig = 4
else:
raise ValueError('Pass!')
if antialiasing:
kernel_size = kernel_size_orig * downsampling_factor
else:
kernel_size = kernel_size_orig
if downsampling_factor % 2 == 0:
a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size))
else:
kernel_size -= 1
a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1))
with torch.no_grad():
r = torch.linspace(-a, a, steps=kernel_size)
k = cubic_contribution(r).view(-1, 1)
k = torch.matmul(k, k.t())
k /= k.sum()
return k
def reflect_padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int) -> torch.Tensor:
'''
Apply reflect padding to the given Tensor.
Note that it is slightly different from the PyTorch functional.pad,
where boundary elements are used only once.
Instead, we follow the MATLAB implementation
which uses boundary elements twice.
For example,
[a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
while our implementation yields [a, a, b, c, d, d].
'''
b, c, h, w = x.size()
if dim == 2 or dim == -2:
padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
padding_buffer[..., pad_pre:(h + pad_pre), :].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
for p in range(pad_post):
padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
else:
padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
padding_buffer[..., pad_pre:(w + pad_pre)].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
for p in range(pad_post):
padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
return padding_buffer
def padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int,
padding_type: typing.Optional[str]='reflect') -> torch.Tensor:
if padding_type is None:
return x
elif padding_type == 'reflect':
x_pad = reflect_padding(x, dim, pad_pre, pad_post)
else:
raise ValueError('{} padding is not supported!'.format(padding_type))
return x_pad
def get_padding(
base: torch.Tensor,
kernel_size: int,
x_size: int) -> typing.Tuple[int, int, torch.Tensor]:
base = base.long()
r_min = base.min()
r_max = base.max() + kernel_size - 1
if r_min <= 0:
pad_pre = -r_min
pad_pre = pad_pre.item()
base += pad_pre
else:
pad_pre = 0
if r_max >= x_size:
pad_post = r_max - x_size + 1
pad_post = pad_post.item()
else:
pad_post = 0
return pad_pre, pad_post, base
def get_weight(
dist: torch.Tensor,
kernel_size: int,
kernel: str='cubic',
sigma: float=2.0,
antialiasing_factor: float=1) -> torch.Tensor:
buffer_pos = dist.new_zeros(kernel_size, len(dist))
for idx, buffer_sub in enumerate(buffer_pos):
buffer_sub.copy_(dist - idx)
# Expand (downsampling) / Shrink (upsampling) the receptive field.
buffer_pos *= antialiasing_factor
if kernel == 'cubic':
weight = cubic_contribution(buffer_pos)
elif kernel == 'gaussian':
weight = gaussian_contribution(buffer_pos, sigma=sigma)
else:
raise ValueError('{} kernel is not supported!'.format(kernel))
weight /= weight.sum(dim=0, keepdim=True)
return weight
def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor:
# Resize height
if dim == 2 or dim == -2:
k = (kernel_size, 1)
h_out = x.size(-2) - kernel_size + 1
w_out = x.size(-1)
# Resize width
else:
k = (1, kernel_size)
h_out = x.size(-2)
w_out = x.size(-1) - kernel_size + 1
unfold = F.unfold(x, k)
unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
return unfold
def reshape_input(
x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, _I, _I]:
if x.dim() == 4:
b, c, h, w = x.size()
elif x.dim() == 3:
c, h, w = x.size()
b = None
elif x.dim() == 2:
h, w = x.size()
b = c = None
else:
raise ValueError('{}-dim Tensor is not supported!'.format(x.dim()))
x = x.view(-1, 1, h, w)
return x, b, c, h, w
def reshape_output(
x: torch.Tensor, b: _I, c: _I) -> torch.Tensor:
rh = x.size(-2)
rw = x.size(-1)
# Back to the original dimension
if b is not None:
x = x.view(b, c, rh, rw) # 4-dim
else:
if c is not None:
x = x.view(c, rh, rw) # 3-dim
else:
x = x.view(rh, rw) # 2-dim
return x
def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]:
if x.dtype != torch.float32 or x.dtype != torch.float64:
dtype = x.dtype
x = x.float()
else:
dtype = None
return x, dtype
def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor:
if dtype is not None:
if not dtype.is_floating_point:
x = x.round()
# To prevent over/underflow when converting types
if dtype is torch.uint8:
x = x.clamp(0, 255)
x = x.to(dtype=dtype)
return x
def resize_1d(
x: torch.Tensor,
dim: int,
size: typing.Optional[int],
scale: typing.Optional[float],
kernel: str='cubic',
sigma: float=2.0,
padding_type: str='reflect',
antialiasing: bool=True) -> torch.Tensor:
'''
Args:
x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W).
dim (int):
scale (float):
size (int):
Return:
'''
# Identity case
if scale == 1:
return x
# Default bicubic kernel with antialiasing (only when downsampling)
if kernel == 'cubic':
kernel_size = 4
else:
kernel_size = math.floor(6 * sigma)
if antialiasing and (scale < 1):
antialiasing_factor = scale
kernel_size = math.ceil(kernel_size / antialiasing_factor)
else:
antialiasing_factor = 1
# We allow margin to both sizes
kernel_size += 2
# Weights only depend on the shape of input and output,
# so we do not calculate gradients here.
with torch.no_grad():
pos = torch.linspace(
0, size - 1, steps=size, dtype=x.dtype, device=x.device,
)
pos = (pos + 0.5) / scale - 0.5
base = pos.floor() - (kernel_size // 2) + 1
dist = pos - base
weight = get_weight(
dist,
kernel_size,
kernel=kernel,
sigma=sigma,
antialiasing_factor=antialiasing_factor,
)
pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
# To backpropagate through x
x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
unfold = reshape_tensor(x_pad, dim, kernel_size)
# Subsampling first
if dim == 2 or dim == -2:
sample = unfold[..., base, :]
weight = weight.view(1, kernel_size, sample.size(2), 1)
else:
sample = unfold[..., base]
weight = weight.view(1, kernel_size, 1, sample.size(3))
# Apply the kernel
x = sample * weight
x = x.sum(dim=1, keepdim=True)
return x
def downsampling_2d(
x: torch.Tensor,
k: torch.Tensor,
scale: int,
padding_type: str='reflect') -> torch.Tensor:
c = x.size(1)
k_h = k.size(-2)
k_w = k.size(-1)
k = k.to(dtype=x.dtype, device=x.device)
k = k.view(1, 1, k_h, k_w)
k = k.repeat(c, c, 1, 1)
e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False)
e = e.view(c, c, 1, 1)
k = k * e
pad_h = (k_h - scale) // 2
pad_w = (k_w - scale) // 2
x = padding(x, -2, pad_h, pad_h, padding_type=padding_type)
x = padding(x, -1, pad_w, pad_w, padding_type=padding_type)
y = F.conv2d(x, k, padding=0, stride=scale)
return y
def imresize(
input: torch.Tensor,
scale: typing.Optional[float]=None,
sizes: typing.Optional[typing.Tuple[int, int]]=None,
kernel: typing.Union[str, torch.Tensor]='cubic',
sigma: float=2,
rotation_degree: float=0,
padding_type: str='reflect',
antialiasing: bool=True) -> torch.Tensor:
'''
Args:
x (torch.Tensor):
scale (float):
sizes (tuple(int, int)):
kernel (str, default='cubic'):
sigma (float, default=2):
rotation_degree (float, default=0):
padding_type (str, default='reflect'):
antialiasing (bool, default=True):
Return:
torch.Tensor:
'''
if scale is None and sizes is None:
raise ValueError('One of scale or sizes must be specified!')
if scale is not None and sizes is not None:
raise ValueError('Please specify scale or sizes to avoid conflict!')
x, b, c, h, w = reshape_input(input)
if sizes is None:
'''
# Check if we can apply the convolution algorithm
scale_inv = 1 / scale
if isinstance(kernel, str) and scale_inv.is_integer():
kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing)
elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer():
raise ValueError(
'An integer downsampling factor '
'should be used with a predefined kernel!'
)
'''
# Determine output size
sizes = (math.ceil(h * scale), math.ceil(w * scale))
scales = (scale, scale)
if scale is None:
scales = (sizes[0] / h, sizes[1] / w)
x, dtype = cast_input(x)
if isinstance(kernel, str):
# Shared keyword arguments across dimensions
kwargs = {
'kernel': kernel,
'sigma': sigma,
'padding_type': padding_type,
'antialiasing': antialiasing,
}
# Core resizing module
x = resize_1d(x, -2, size=sizes[0], scale=scales[0], **kwargs)
x = resize_1d(x, -1, size=sizes[1], scale=scales[1], **kwargs)
elif isinstance(kernel, torch.Tensor):
x = downsampling_2d(x, kernel, scale=int(1 / scale))
x = reshape_output(x, b, c)
x = cast_output(x, dtype)
return x
if __name__ == '__main__':
# Just for debugging
torch.set_printoptions(precision=4, sci_mode=False, edgeitems=16, linewidth=200)
a = torch.arange(64).float().view(1, 1, 8, 8)
z = imresize(a, 0.5)
print(z)
#a = torch.arange(16).float().view(1, 1, 4, 4)
'''
a = torch.zeros(1, 1, 4, 4)
a[..., 0, 0] = 100
a[..., 1, 0] = 10
a[..., 0, 1] = 1
a[..., 0, -1] = 100
a = torch.zeros(1, 1, 4, 4)
a[..., -1, -1] = 100
a[..., -2, -1] = 10
a[..., -1, -2] = 1
a[..., -1, 0] = 100
'''
#b = imresize(a, sizes=(3, 8), antialiasing=False)
#c = imresize(a, sizes=(11, 13), antialiasing=True)
#c = imresize(a, sizes=(4, 4), antialiasing=False, kernel='gaussian', sigma=1)
#print(a)
#print(b)
#print(c)
#r = discrete_kernel('cubic', 1 / 3)
#print(r)
'''
a = torch.arange(225).float().view(1, 1, 15, 15)
imresize(a, sizes=[5, 5])
'''
| 13,613 | 27.904459 | 84 | py |
PLRDiff | PLRDiff-main/guided_diffusion/rsfac_gaussian_diffusion.py | """
This code started out as a PyTorch port of the following:
https://github.com/HJ-harry/MCG_diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
The conditions are changed and coefficient matrix estimation is added.
"""
import enum
import math
import numpy as np
import torch as th
from torch.autograd import grad
import torch.nn.functional as nF
from functools import partial
import torch.nn.parameter as Para
from .core import imresize
from os.path import join as join
def _warmup_beta(linear_start, linear_end, n_timestep, warmup_frac):
betas = linear_end * np.ones(n_timestep, dtype=np.float64)
warmup_time = int(n_timestep * warmup_frac)
betas[:warmup_time] = np.linspace(
linear_start, linear_end, warmup_time, dtype=np.float64)
return betas
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == 'quad':
betas = np.linspace(linear_start ** 0.5, linear_end ** 0.5,
n_timestep, dtype=np.float64) ** 2
elif schedule == 'linear':
betas = np.linspace(linear_start, linear_end,
n_timestep, dtype=np.float64)
elif schedule == 'warmup10':
betas = _warmup_beta(linear_start, linear_end,
n_timestep, 0.1)
elif schedule == 'warmup50':
betas = _warmup_beta(linear_start, linear_end,
n_timestep, 0.5)
elif schedule == 'const':
betas = linear_end * np.ones(n_timestep, dtype=np.float64)
elif schedule == 'jsd': # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1. / np.linspace(n_timestep,
1, n_timestep, dtype=np.float64)
elif schedule == "cosine":
timesteps = (
th.arange(n_timestep + 1, dtype=th.float64) /
n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * math.pi / 2
alphas = th.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = betas.clamp(max=0.999)
else:
raise NotImplementedError(schedule)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class Param(th.nn.Module):
def __init__(self, data):
super(Param, self).__init__()
self.E = Para.Parameter(data=data)
def forward(self,):
return self.E
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
"""
def __init__(
self,
*,
betas
):
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
self.sqrt_alphas_cumprod_prev = np.sqrt(np.append(1., self.alphas_cumprod))
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None
):
B = x.shape[0]
noise_level = th.FloatTensor([self.sqrt_alphas_cumprod_prev[int(t.item())+1]]).repeat(B, 1).to(x.device)
model_output = model(x, noise_level)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, posterior_log_variance = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
return {
"mean": model_mean,
"log_variance": posterior_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
):
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
)
noise = th.randn_like(x)
nonzero_param = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = out["mean"] + nonzero_param * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
Rr,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_condition=None,
param=None,
save_root=None,
progress=True
):
finalX = None
finalE = None
dstep = 1000
for (sample, E) in self.p_sample_loop_progressive(
model,
shape,
Rr,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_condition=model_condition,
progress=progress,
param=param,
save_root=save_root
):
finalX = sample
finalE = E
return finalX["sample"], finalE
def p_sample_loop_progressive(
self,
model,
shape,
Rr,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_condition=None,
device=None,
progress=False,
param=None,
save_root=None # use it for output intermediate predictions
):
Bb, Cc, Hh, Ww = shape
Rr = Rr
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn((Bb, Rr, Hh, Ww), device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
blur = partial(nF.conv2d, weight=param['kernel'], padding=int((param['k_s'] - 1)/2), groups=Cc)
down = partial(imresize, scale=1/param['scale'])
LRMS = model_condition["LRMS"]
PAN = model_condition["PAN"]
## estimate coefficient matrix E
Eband = param['Band']
bimg = th.index_select(LRMS, 1, Eband).reshape(Bb, Rr, -1) # base tensor from LRMS
# estimate coefficient matrix E by solving least square problem
t1 = th.matmul(bimg, bimg.transpose(1,2)) + 1e-4*th.eye(Rr).type(bimg.dtype).to(device)
t2 = th.matmul(LRMS.reshape(Bb, Cc, -1), bimg.transpose(1,2))
E = th.matmul(t2, th.inverse(t1))
del bimg, t1, t2
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# re-instantiate requires_grad for backpropagation
img = img.requires_grad_()
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn
)
xhat_1 = (out["pred_xstart"] +1)/2
xhat_1 = th.matmul(E, xhat_1.reshape(Bb, Rr, -1)).reshape(*shape)
xhat_2 = blur(input=xhat_1)
xhat_3 = down(input=xhat_2)
norm1 = th.norm(LRMS - xhat_3)
xhat_4 = th.matmul(xhat_1.permute(0,2,3,1), param["PH"]).permute(0,3,1,2) # HEX
norm2 = th.norm(PAN - xhat_4) # ||P - HEX||
norm_gradX = grad(outputs=norm1 + (param['eta2']/param['eta1'])*norm2, inputs=img)[0]
out["sample"] = out["sample"] - param['eta1']*norm_gradX
del norm_gradX
yield out, E
img = out["sample"]
# Clears out small amount of gpu memory. If not used, memory usage will accumulate and OOM will occur.
img.detach_()
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 16,226 | 34.900442 | 129 | py |
PLRDiff | PLRDiff-main/guided_diffusion/sr3_modules/unet.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from inspect import isfunction
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# PositionalEncoding Source: https://github.com/lmnt-com/wavegrad/blob/master/src/wavegrad/model.py
class PositionalEncoding(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, noise_level):
count = self.dim // 2
step = torch.arange(count, dtype=noise_level.dtype,
device=noise_level.device) / count
encoding = noise_level.unsqueeze(
1) * torch.exp(-math.log(1e4) * step.unsqueeze(0))
encoding = torch.cat(
[torch.sin(encoding), torch.cos(encoding)], dim=-1)
return encoding
class FeatureWiseAffine(nn.Module):
def __init__(self, in_channels, out_channels, use_affine_level=False):
super(FeatureWiseAffine, self).__init__()
self.use_affine_level = use_affine_level
self.noise_func = nn.Sequential(
nn.Linear(in_channels, out_channels*(1+self.use_affine_level))
)
def forward(self, x, noise_embed):
batch = x.shape[0]
if self.use_affine_level:
gamma, beta = self.noise_func(noise_embed).view(
batch, -1, 1, 1).chunk(2, dim=1)
x = (1 + gamma) * x + beta
else:
x = x + self.noise_func(noise_embed).view(batch, -1, 1, 1)
return x
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class Upsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.conv = nn.Conv2d(dim, dim, 3, padding=1)
def forward(self, x):
return self.conv(self.up(x))
class Downsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.Conv2d(dim, dim, 3, 2, 1)
def forward(self, x):
return self.conv(x)
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=32, dropout=0):
super().__init__()
self.block = nn.Sequential(
nn.GroupNorm(groups, dim),
Swish(),
nn.Dropout(dropout) if dropout != 0 else nn.Identity(),
nn.Conv2d(dim, dim_out, 3, padding=1)
)
def forward(self, x):
return self.block(x)
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, noise_level_emb_dim=None, dropout=0, use_affine_level=False, norm_groups=32):
super().__init__()
self.noise_func = FeatureWiseAffine(
noise_level_emb_dim, dim_out, use_affine_level)
self.block1 = Block(dim, dim_out, groups=norm_groups)
self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout)
self.res_conv = nn.Conv2d(
dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb):
b, c, h, w = x.shape
h = self.block1(x)
h = self.noise_func(h, time_emb)
h = self.block2(h)
return h + self.res_conv(x)
class SelfAttention(nn.Module):
def __init__(self, in_channel, n_head=1, norm_groups=32):
super().__init__()
self.n_head = n_head
self.norm = nn.GroupNorm(norm_groups, in_channel)
self.qkv = nn.Conv2d(in_channel, in_channel * 3, 1, bias=False)
self.out = nn.Conv2d(in_channel, in_channel, 1)
def forward(self, input):
batch, channel, height, width = input.shape
n_head = self.n_head
head_dim = channel // n_head
norm = self.norm(input)
qkv = self.qkv(norm).view(batch, n_head, head_dim * 3, height, width)
query, key, value = qkv.chunk(3, dim=2) # bhdyx
attn = torch.einsum(
"bnchw, bncyx -> bnhwyx", query, key
).contiguous() / math.sqrt(channel)
attn = attn.view(batch, n_head, height, width, -1)
attn = torch.softmax(attn, -1)
attn = attn.view(batch, n_head, height, width, height, width)
out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous()
out = self.out(out.view(batch, channel, height, width))
return out + input
class ResnetBlocWithAttn(nn.Module):
def __init__(self, dim, dim_out, *, noise_level_emb_dim=None, norm_groups=32, dropout=0, with_attn=False):
super().__init__()
self.with_attn = with_attn
self.res_block = ResnetBlock(
dim, dim_out, noise_level_emb_dim, norm_groups=norm_groups, dropout=dropout)
if with_attn:
self.attn = SelfAttention(dim_out, norm_groups=norm_groups)
def forward(self, x, time_emb):
x = self.res_block(x, time_emb)
if(self.with_attn):
x = self.attn(x)
return x
def Reverse(lst):
return [ele for ele in reversed(lst)]
class UNet(nn.Module):
def __init__(
self,
in_channel=6,
out_channel=3,
inner_channel=32,
norm_groups=32,
channel_mults=(1, 2, 4, 8, 8),
attn_res=(8),
res_blocks=3,
dropout=0,
with_noise_level_emb=True,
image_size=128
):
super().__init__()
if with_noise_level_emb:
noise_level_channel = inner_channel
self.noise_level_mlp = nn.Sequential(
PositionalEncoding(inner_channel),
nn.Linear(inner_channel, inner_channel * 4),
Swish(),
nn.Linear(inner_channel * 4, inner_channel)
)
else:
noise_level_channel = None
self.noise_level_mlp = None
num_mults = len(channel_mults)
pre_channel = inner_channel
feat_channels = [pre_channel]
now_res = image_size
self.init_conv = nn.Conv2d(in_channels=in_channel, out_channels=inner_channel, kernel_size=3, padding=1)
downs = []
for ind in range(num_mults):
is_last = (ind == num_mults - 1)
use_attn = (now_res in attn_res)
channel_mult = inner_channel * channel_mults[ind]
for _ in range(0, res_blocks):
downs.append(ResnetBlocWithAttn(
pre_channel, channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn))
feat_channels.append(channel_mult)
pre_channel = channel_mult
if not is_last:
downs.append(Downsample(pre_channel))
feat_channels.append(pre_channel)
now_res = now_res//2
self.downs = nn.ModuleList(downs)
self.mid = nn.ModuleList([
ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=True),
ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=False)
])
ups = []
for ind in reversed(range(num_mults)):
is_last = (ind < 1)
use_attn = (now_res in attn_res)
channel_mult = inner_channel * channel_mults[ind]
for _ in range(0, res_blocks+1):
ups.append(ResnetBlocWithAttn(
pre_channel+feat_channels.pop(), channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=use_attn))
pre_channel = channel_mult
if not is_last:
ups.append(Upsample(pre_channel))
now_res = now_res*2
self.ups = nn.ModuleList(ups)
self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups)
def forward(self, x, time, feat_need=False):
t = self.noise_level_mlp(time) if exists(
self.noise_level_mlp) else None
# First downsampling layer
x = self.init_conv(x)
# Diffusion encoder
feats = [x]
for layer in self.downs:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(x, t)
else:
x = layer(x)
feats.append(x)
if feat_need:
fe = feats.copy()
# Passing through middle layer
for layer in self.mid:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(x, t)
else:
x = layer(x)
# Saving decoder features for CD Head
if feat_need:
fd = []
# Diffiusion decoder
for layer in self.ups:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(torch.cat((x, feats.pop()), dim=1), t)
if feat_need:
fd.append(x)
else:
x = layer(x)
# Final Diffusion layer
x = self.final_conv(x)
# Output encoder and decoder features if feat_need
if feat_need:
return fe, Reverse(fd)
else:
return x
| 9,347 | 31.8 | 150 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/diffusion.py | import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from diffusers import DDPMScheduler, UNet2DModel
from matplotlib import pyplot as plt
from diffusers import DDIMScheduler, DDPMPipeline
from data.dataset import data_loader
import wandb
import tqdm
wandb.init(project="ml-708", entity="mbzuai-")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
root_dir = "data/TB_data"
loader_, dataset = data_loader(root_dir=root_dir, batch_size=10)
train_dataloader = loader_['train']
def corrupt(x, amount):
"""Corrupt the input `x` by mixing it with noise according to `amount`"""
noise = torch.rand_like(x)
amount = amount.view(-1, 1, 1, 1) # Sort shape so broadcasting works
return x*(1-amount) + noise*amount
#@markdown Trying UNet2DModel instead of BasicUNet:
# Dataloader (you can mess with batch size)
#batch_size = 70
#train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# How many runs through the data should we do?
n_epochs = 200
# Create the network
net = UNet2DModel(
sample_size=224, # the target image resolution
in_channels=3, # the number of input channels, 3 for RGB images
out_channels=3, # the number of output channels
layers_per_block=2,
act_fn="silu",
add_attention=True,
center_input_sample=False,
downsample_padding=0,
flip_sin_to_cos=False,
freq_shift=1,
mid_block_scale_factor=1,
norm_eps=1e-06,
norm_num_groups=32,
time_embedding_type="positional", # how many ResNet layers to use per UNet block
block_out_channels=(128,
128,
256,
256,
512,
512), # Roughly matching our basic unet example
down_block_types=(
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"DownBlock2D"
),
up_block_types=(
"UpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D" # a regular ResNet upsampling block
),
)
#<<<
net.to(device)
# Our loss finction
loss_fn = nn.MSELoss()
# The optimizer
opt = torch.optim.Adam(net.parameters(), lr=1e-3)
lr=1e-3
# Keeping a record of the losses for later viewing
losses = []
scheduler = DDIMScheduler(beta_end=0.02,beta_schedule="linear",beta_start=0.0001, clip_sample=True, num_train_timesteps=1000, prediction_type="epsilon")
image_pipe = DDPMPipeline(net,scheduler=scheduler)
image_pipe.to(device);
grad_accumulation_steps = 2 # @param
optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr)
# The training loop
for epoch in range(n_epochs):
for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
images, labels = batch
clean_images = images.to(device)
# Sample noise to add to the images
noise = torch.randn(clean_images.shape).to(clean_images.device)
bs = clean_images.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0,
image_pipe.scheduler.num_train_timesteps,
(bs,),
device=clean_images.device,
).long()
# Add noise to the clean images according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps)
# Get the model prediction for the noise
noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0]
# Compare the prediction with the actual noise:
loss = F.mse_loss(
noise_pred, noise
) # NB - trying to predict noise (eps) not (noisy_ims-clean_ims) or just (clean_ims)
# Store for later plotting
losses.append(loss.item())
# Update the model parameters with the optimizer based on this loss
loss.backward(loss)
# Gradient accumulation:
if (step + 1) % grad_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
print(f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}")
image_pipe.save_pretrained(f"saved_model/my-finetuned-model_{epoch}")
| 4,562 | 32.551471 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/test.py | import argparse
import torch
from tqdm import tqdm
import os
from models import mlp
from data.dataset import data_loader
from data.dataset import data_loader_attacks
root_dir = "./data/attack-data/0.03"
def test_vit(model, dataloader_test):
"""
This function used to test ViT.
Args:
model: ViT model
dataaloader_test: loader for test images
return:
Avg test accuracy of ViT
"""
test_acc = 0.0
for images, labels in tqdm(dataloader_test):
images = images.cuda()
labels= labels.cuda()
with torch.no_grad():
model.eval()
output = model(images)
prediction = torch.argmax(output, dim=-1)
acc = sum(prediction == labels).float().item()/len(labels)
test_acc += acc
print(f'Testing accuracy = {(test_acc/len(dataloader_test)):.4f}')
return round(test_acc/len(dataloader_test),2)
def test_mlps(mlps_list, dataloader_test, mlp_root_dir):
for mlp in range(1, len(mlps_list) +1):
acc_avg = 0.0
mlp_in = torch.load(os.path.join(mlp_root_dir, mlps_list[mlp-1])).cuda()
mlp_in.eval()
print(f'MLP of index {mlp-1} has been loaded')
for images, labels in tqdm(dataloader_test):
images = images.cuda()
labels= labels.cuda()
x = model.patch_embed(images)
x = model.pos_drop(x)
for block in range(mlp):
x = model.blocks[block](x)
with torch.no_grad():
output = mlp_in(x)
predictions = torch.argmax(output, dim=-1)
acc = torch.sum(predictions == labels).item()/len(labels)
acc_avg += acc
print(f'Accuracy of block {mlp-1} = {(acc_avg/len(dataloader_test)):.3f}')
pass
parser = argparse.ArgumentParser(description='Testing ViT or MLPs')
parser.add_argument('--model_name', type=str , choices=['ViT','MLPs'],
help='Choose between ViT or MLPs')
parser.add_argument('--vit_path', type=str ,
help='pass the path of downloaded ViT')
parser.add_argument('--mlp_path', type=str ,
help='pass the path for the downloaded MLPs folder')
args = parser.parse_args()
loader_, dataset_ = data_loader_attacks(root_dir=root_dir, attack_name='FGSM')
model = torch.load(args.vit_path).cuda()
model.eval()
if args.model_name == 'ViT':
acc = test_vit(model=model, dataloader_test=loader_)
else:
mlps_list = sorted(os.listdir(args.mlp_path))
acc = test_mlps(mlps_list= mlps_list, dataloader_test=loader_, mlp_root_dir=args.mlp_path) | 2,645 | 31.666667 | 94 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/aabb.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import os
from data.dataset import data_loader
device = (
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu"
)
def test():
image_pipe = DDPMPipeline.from_pretrained("saved_model/my-finetuned-model_135")
image_pipe.to(device);
scheduler = DDIMScheduler.from_pretrained("saved_model/my-finetuned-model_135/scheduler")
scheduler.set_timesteps(num_inference_steps=600)
x = torch.randn(8, 3, 256, 256).to(device) # Batch of 8
for i, t in tqdm(enumerate(scheduler.timesteps)):
model_input = scheduler.scale_model_input(x, t)
with torch.no_grad():
noise_pred = image_pipe.unet(model_input, t)["sample"]
x = scheduler.step(noise_pred, t, x).prev_sample
return x
x = test()
grid = torchvision.utils.make_grid(x, nrow=4)
plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);
| 1,250 | 29.512195 | 93 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/attack.py |
import foolbox as fb
import torch
import torch.nn as nn
from autoattack import AutoAttack
class Attack():
"""
This class used to generate adversarial images.
when create object specify epsilon: float, attack_type: 'FGSM, CW, BIM, L2PGD, PGD, LinfBIM'.
generate method return images and success tensors.
test_model method, give the accuracy of the model after passing the adversarial examples.
succecces tensor shows whether the example succed to fool the model or not
"""
def __init__(self, epsilon, attack_type, model) :
self.epsilon= epsilon
self.attack_type = attack_type
self.model_fool = fb.models.PyTorchModel(model ,bounds=(0,1))
self.adversary = AutoAttack(model, norm='Linf', eps=self.epsilon, version='standard')
def FGSM(self, samples, labels):
"""
Generate FGSM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.FGSM()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def L2PGD(self, samples, labels):
"""
Generate L2 PGD attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2PGD()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def CW(self, samples, labels):
"""
Generate Carlini & Wagner attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2CarliniWagnerAttack(6,1000,0.01,0)
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons= self.epsilon)
print(f'Sum = {sum(success)}')
return adv_images, success
def BIM(self, samples, labels):
"""
Generate BIM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2BasicIterativeAttack()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def PGD(self, samples, labels):
"""
Generate Linf PGD attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.PGD()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def LinfBIM(self, samples, labels):
"""
Generate Linf BIM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.LinfBasicIterativeAttack()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def AutoAttack(self, samples, labels):
"""
Generate Auto attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
x_adv = self.adversary.run_standard_evaluation(samples, labels, bs=15)
success = None
return x_adv, success
def generate_attack(self, samples, labels):
"""
Generate attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images -> generated from the clean images
success tensor -> shows whether the attack succeded in fooling the model or not
"""
if self.attack_type == 'FGSM':
adv_img, success = self.FGSM(samples, labels)
return adv_img, success
elif self.attack_type == 'CW':
adv_img, success = self.CW(samples, labels)
return adv_img, success
elif self.attack_type == 'L2PGD':
adv_img, success = self.L2PGD(samples, labels)
return adv_img, success
elif self.attack_type == 'BIM':
adv_img, success = self.BIM(samples, labels)
return adv_img, success
elif self.attack_type == 'PGD':
adv_img, success = self.PGD(samples, labels)
return adv_img, success
elif self.attack_type =='LinfBIM':
adv_img, success = self.LinfBIM(samples, labels)
return adv_img, success
elif self.attack_type =='AutoAttack':
adv_img, success = self.AutoAttack(samples, labels)
return adv_img, success
else:
print(f'Attacks of type {self.attack_type} is not supported')
| 7,000 | 34.358586 | 99 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/mlp.py | import torch.nn as nn
import torch
from utils import get_classifiers_list
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196):
super().__init__()
self.linear1 = nn.Linear(in_features= in_features, out_features= 4096)
self.linear2 = nn.Linear(in_features= 4096, out_features= 2048)
self.linear3 = nn.Linear(in_features= 2048, out_features= 128)
self.linear4 = nn.Linear(in_features= 128, out_features= num_classes)
self.dropout = nn.Dropout(0.3)
def forward(self,x):
x= x.reshape(-1, 196*768)
x = nn.functional.relu(self.linear1(x))
x = nn.functional.relu(self.linear2(x))
x = nn.functional.relu(self.linear3(x))
x = self.linear4(x)
return x
class Big_model(nn.Module):
def __init__(self, MLP_path = 'models/MLP_new_chest', num_classifiers=3, vit_path='models/vit_base_patch16_224_in21k_test-accuracy_0.96_chest.pth'):
super().__init__()
self.MLP_path = MLP_path
self.vit_path = vit_path
self.num_classifiers= num_classifiers
self.mlp_list = get_classifiers_list(self.MLP_path, num_classifiers = self.num_classifiers)
self.model = torch.load(self.vit_path)
def forward(self,x):
final_prediction = []
vit_predictions = self.model(x)
y = torch.softmax(vit_predictions*25, dim=-1)
final_prediction.append(y)
x = self.model.patch_embed(x)
x_0 = self.model.pos_drop(x)
i = 0
for mlp in self.mlp_list:
x_0 = self.model.blocks[i](x_0)
mlp_output = mlp(x_0)
mlp_predictions = torch.softmax(mlp_output*25, dim=-1)
final_prediction.append(mlp_predictions)
i+=1
stacked_tesnor = torch.stack(final_prediction,dim=1)
preds_major = stacked_tesnor.sum(dim=1)
#preds_major = preds_major.float()
#preds_major = preds_major.requires_grad_(True)
return preds_major
| 2,262 | 33.815385 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/utils.py | import os
import torch
from attack import Attack
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import save_image
from autoattack import AutoAttack
def generate_save_attacks(attack_names, model, samples, classes ,attack_image_dir, epsilon = 0.03, batch_size=30):
"""
it saves attack images generated from test images in test attacks folder in a dirr folder.
inside test attacks we will have folder for each attack, and inside each attack folder we will have folder for classes.
Args:
attack_names --> list of attacks.
model --> model want to attack.
samples --> data_loaders['test']
classes --> list of classes names.
attack_image_dir --> root directory for attack images to be saved in
Doesnt return any values.
"""
for attack in attack_names:
attack_folder = f'Test_attacks_{attack}'
print(attack_image_dir)
if not os.path.exists(os.path.join(attack_image_dir, attack_folder)):
os.makedirs(os.path.join(attack_image_dir, attack_folder))
inter_ = os.path.join(attack_image_dir, attack_folder) + '/'
for classe in classes:
if not os.path.exists(os.path.join(inter_, classe)):
os.makedirs(os.path.join(inter_, classe))
for attack_name in attack_names:
if attack_name != 'AUTOPGD':
batchNum = 0
model.eval()
attack = Attack(epsilon= epsilon , attack_type= attack_name, model=model)
for im, lab in samples:
im = im.cuda()
lab = lab.cuda()
adv_img, _ = attack.generate_attack(im, labels=lab)
print('Batch')
count = 0
for image, label in zip(adv_img, lab):
if (lab[count]):
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[1]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
else:
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[0]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
count += 1
batchNum += 1
elif attack_name == 'AUTOPGD':
batchNum = 0
adversary = AutoAttack(model=model, eps=epsilon, version='custom', norm='Linf', attacks_to_run=['apgd-ce'])
for im, lab in samples:
im = im.cuda()
lab = lab.cuda()
adv_img = adversary.run_standard_evaluation(im,lab, bs=lab.shape[0])
count = 0
for image, label in zip(adv_img, lab):
if (lab[count]):
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[1]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
else:
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[0]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
count += 1
batchNum += 1
def get_classifiers_list(MLP_path, num_classifiers=5):
"""
Return list of intermdiate MLPs.
Args:
MLP_path: Path of the downloaded MLPs directory.
"""
i=0
classifiers_list = [0]*num_classifiers
for classif in sorted(os.listdir(MLP_path)):
classifiers_list[i] = torch.load(os.path.join(MLP_path, classif)).eval().cuda()
i+=1
print(f'MLP {i} is loaded!')
return classifiers_list
def frob_norm_kl_matrix(stacked_tesnor,num_classifiers=5):
frob_values = []
for sample in stacked_tesnor:
div_matrix = torch.zeros((num_classifiers+1,num_classifiers+1)) #initialize zero 6x6 tensor
for i in range (num_classifiers+1): #loop over classifiers and MLP head (take one only)
for j in range(num_classifiers+1): #loop over classifiers and MLP head
x2 = torch.nn.functional.kl_div(sample[i].log(),sample[j].log(), reduction='sum', log_target=True).item()
div_matrix[i,j] = x2
frob_norm = np.sqrt(torch.sum(torch.square(div_matrix)).item())
frob_values.append(frob_norm)
return frob_values
def roc(attack_name, frob_dict, threshold):
tpr_list= []
fpr_list = []
for i in threshold:
fp = sum(frob_dict['clean'] >= i).item()
tn = sum(frob_dict['clean'] < i).item()
tp = sum(frob_dict[attack_name] >= i).item()
fn = sum(frob_dict[attack_name] < i).item()
fpr = (fp)/(fp+tn)
tpr = (tp)/(tp+fn)
tpr_list.append(tpr)
fpr_list.append(fpr)
return tpr_list,fpr_list,threshold
def frobenius_norm(data_loader, model, mlps_list):
frob_norm_values = []
for images, _ in data_loader: #loop over images
final_probs = []
images = images.cuda()
vit_probs = torch.softmax(model(images).detach().cpu(),dim=-1)
final_probs.append(vit_probs.detach().cpu())
x = model.patch_embed(images)
x_0 = model.pos_drop(x)
i=0
for mlp in mlps_list:
x_0 = model.blocks[i](x_0)
mlp_prob = torch.softmax(mlp(x_0).detach().cpu(),dim=-1)
final_probs.append(mlp_prob.detach().cpu())
i+=1
stacked_tesnor = torch.stack(final_probs,dim=1)
frob_list = frob_norm_kl_matrix(stacked_tesnor)
frob_norm_values = frob_norm_values + frob_list
return frob_norm_values
def plot_roc (tpr_list, fpr_list, attack_name):
plt.figure(figsize=(10,6))
plt.plot(fpr_list,tpr_list, '-', label = attack_name)
plt.title(f'ROC_{attack_name}_Attack')
plt.legend(loc=4)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.savefig(f'{attack_name}_ROC_Curve', bbox_inches='tight')
plt.show() | 6,033 | 37.433121 | 177 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/majority_voting.py | import os
import torch
import argparse
import numpy as np
from utils import *
from data.dataset import data_loader, data_loader_attacks
import mlp
def majority_voting(data_loader, model, mlps_list):
"""
SEViT performance with majority voting.
Args:
data_loader: loader of test samples for clean images, or attackes generated from the test samples
model: ViT model
mlps_list: list of intermediate MLPs
Return:
Accuracy.
"""
acc_ = 0.0
for images, labels in data_loader:
final_prediction = []
images = images.cuda()
vit_output = model(images)
vit_predictions = torch.argmax(vit_output.detach().cpu(), dim=-1)
final_prediction.append(vit_predictions.detach().cpu())
x = model.patch_embed(images)
x_0 = model.pos_drop(x)
i=0
for mlp in mlps_list:
x_0 = model.blocks[i](x_0)
mlp_output = mlp(x_0)
mlp_predictions = torch.argmax(mlp_output.detach().cpu(), dim=-1)
final_prediction.append(mlp_predictions.detach().cpu())
i+=1
stacked_tesnor = torch.stack(final_prediction,dim=1)
preds_major = torch.argmax(torch.nn.functional.one_hot(stacked_tesnor).sum(dim=1), dim=-1)
acc = (preds_major == labels).sum().item()/len(labels)
acc_ += acc
final_acc = acc_ / len(data_loader)
print(f'Final Accuracy From Majority Voting = {(final_acc *100) :.3f}%' )
return final_acc
parser = argparse.ArgumentParser(description='Majority Voting')
parser.add_argument('--images_type', type=str , choices=['clean', 'adversarial'],
help='Path to root directory of images')
parser.add_argument('--image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--vit_path', type=str ,
help='Path to the downloaded ViT model')
parser.add_argument('--mlp_path', type=str ,
help='Path to the downloaded MLPs folder')
parser.add_argument('--attack_name', type=str,
help='Attack name')
args = parser.parse_args()
model = torch.load(args.vit_path).cuda()
model.eval()
print('ViT is loaded!')
MLPs_list = get_classifiers_list(MLP_path=args.mlp_path)
print('All MLPs are loaded!')
if args.images_type == 'clean':
loader_, dataset_ = data_loader(root_dir=args.image_folder_path, batch_size=15)
majority_voting(data_loader=loader_['test'], model= model, mlps_list=MLPs_list)
else:
loader_, dataset_ = data_loader_attacks(root_dir=args.image_folder_path, attack_name= args.attack_name, batch_size=15)
majority_voting(data_loader=loader_, model= model, mlps_list=MLPs_list)
| 2,732 | 30.77907 | 122 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/finetuning_diffusion_model.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from diffusers import DDPMScheduler, UNet2DModel
from tqdm.auto import tqdm
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import os
from data.dataset import data_loader
import wandb
wandb.init(project="ml-708", entity="mbzuai-")
device = (
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu"
)
root_dir = "data/TB_data"
loader_, dataset = data_loader(root_dir=root_dir, batch_size=8)
train_dataloader = loader_['train']
net = UNet2DModel(
sample_size=224, # the target image resolution
in_channels=3, # the number of input channels, 3 for RGB images
out_channels=3, # the number of output channels
layers_per_block=2,
act_fn="silu",
add_attention=True,
center_input_sample=False,
downsample_padding=0,
flip_sin_to_cos=False,
freq_shift=1,
mid_block_scale_factor=1,
norm_eps=1e-06,
norm_num_groups=32,
time_embedding_type="positional", # how many ResNet layers to use per UNet block
block_out_channels=(128,
128,
256,
256,
512,
512), # Roughly matching our basic unet example
down_block_types=(
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"DownBlock2D"
),
up_block_types=(
"UpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D" # a regular ResNet upsampling block
),
)
net.to(device)
def train(train_dataloader, epoch_st, epoch_end, lr=1e-4):
#image_pipe = DDPMPipeline.from_pretrained("saved_model/my-finetuned-model_66")
scheduler = DDIMScheduler(beta_end=0.02,beta_schedule="linear",beta_start=0.0001, clip_sample=True, num_train_timesteps=1000, prediction_type="epsilon")
image_pipe = DDPMPipeline(net,scheduler=scheduler)
image_pipe.to(device);
grad_accumulation_steps = 2 # @param
optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr)
losses = []
for epoch in range(epoch_st,epoch_end):
for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
images, labels = batch
clean_images = images.to(device)
# Sample noise to add to the images
noise = torch.randn(clean_images.shape).to(clean_images.device)
bs = clean_images.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0,
image_pipe.scheduler.num_train_timesteps,
(bs,),
device=clean_images.device,
).long()
# Add noise to the clean images according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps)
# Get the model prediction for the noise
noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0]
# Compare the prediction with the actual noise:
loss = F.mse_loss(
noise_pred, noise
) # NB - trying to predict noise (eps) not (noisy_ims-clean_ims) or just (clean_ims)
# Store for later plotting
losses.append(loss.item())
# Update the model parameters with the optimizer based on this loss
loss.backward(loss)
# Gradient accumulation:
if (step + 1) % grad_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
print(
f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}"
)
image_pipe.save_pretrained(f"saved_model_scratch/my-model_{epoch}")
return image_pipe
model = train(train_dataloader=train_dataloader,epoch_st=0,epoch_end=200) | 4,351 | 30.766423 | 156 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/generate_attacks.py | import torch
import argparse
from attack import Attack
from utils import *
from data.dataset import data_loader
from mlp import Big_model
parser = argparse.ArgumentParser(description='Generate Attack from ViT')
parser.add_argument('--epsilons', type=float ,
help='Perturbations Size')
parser.add_argument('--attack_list', type=str , nargs='+',
help='Attack List to Generate')
parser.add_argument('--vit_path', type=str ,
help='pass the path for the downloaded MLPs folder')
parser.add_argument('--attack_images_dir', type=str ,
help='Directory to save the generated attacks')
args = parser.parse_args()
root_dir = "./data/TB_data"
loader_, dataset_ = data_loader(root_dir=root_dir)
""" model = torch.load(args.vit_path).cuda()
model.eval() """
device = torch.device("cuda")
model_mlp = Big_model()
model_mlp.to(device)
model_mlp.eval()
#Generate and save attacks
generate_save_attacks(
attack_names= args.attack_list,
model= model_mlp,
samples= loader_['test'],
classes= ['Normal', 'Tuberculosis'],
attack_image_dir= args.attack_images_dir,
epsilon=args.epsilons,
)
| 1,179 | 28.5 | 72 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/adversarial_detection.py | import torch
import numpy as np
from utils import *
import argparse
from data.dataset import data_loader, data_loader_attacks
parser = argparse.ArgumentParser(description='ROC For Attack')
parser.add_argument('--clean_image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--attack_image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--vit_path', type=str ,
help='Path to the downloaded ViT model')
parser.add_argument('--mlp_path', type=str ,
help='Path to the downloaded MLPs folder')
parser.add_argument('--attack_name', type=str,
help='Attack name')
args = parser.parse_args()
#Load Models
model = torch.load(args.vit_path).cuda()
model.eval()
print('ViT is loaded!')
#Load MLPs
MLPs_list = get_classifiers_list(MLP_path=args.mlp_path)
print('All MLPs are loaded!')
#Load Images (clean and attack)
batch_size = 10
clean_loader_, _= data_loader(root_dir=args.clean_image_folder_path, batch_size=batch_size)
attack_loader_, _= data_loader_attacks(root_dir=args.attack_image_folder_path, attack_name= args.attack_name, batch_size=batch_size)
print('Clean test samples and corresponding adversarial samples are loaded')
#Find Frobenuis Norm
frob_list_clean = frobenius_norm(data_loader=clean_loader_['test'], model=model, mlps_list= MLPs_list)
frob_list_attack = frobenius_norm(data_loader=attack_loader_, model=model, mlps_list= MLPs_list)
print('Frobenuis norm has been calculated')
frob_dict = {'clean': torch.tensor(frob_list_clean), args.attack_name:torch.tensor(frob_list_attack)}
#Find TPR and FPR
tpr_list, fpr_list, threshold = roc(attack_name= args.attack_name, frob_dict= frob_dict, threshold= np.arange(0,90,0.1))
#Plot ROC
plot_roc(tpr_list= tpr_list, fpr_list= fpr_list, attack_name= args.attack_name)
print('ROC figure has been saved in the current directory!') | 1,975 | 36.283019 | 132 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/models/mlp.py | import torch.nn as nn
import torch
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196):
super().__init__()
self.linear1 = nn.Linear(in_features= in_features, out_features= 4096)
self.linear2 = nn.Linear(in_features= 4096, out_features= 2048)
self.linear3 = nn.Linear(in_features= 2048, out_features= 128)
self.linear4 = nn.Linear(in_features= 128, out_features= num_classes)
self.dropout = nn.Dropout(0.3)
def forward(self,x):
x= x.reshape(-1, 196*768)
x = nn.functional.relu(self.linear1(x))
x = nn.functional.relu(self.linear2(x))
x = nn.functional.relu(self.linear3(x))
x = self.linear4(x)
return x | 911 | 30.448276 | 78 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/data/dataset.py | import os
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
# DataLoader and Dataset (Clean Samples)
def data_loader( root_dir, image_size = (224,224), batch_size= 15, train_dir = 'training',test_dir = 'testing', vald_dir = 'validation'):
"""
Class to create Dataset and DataLoader from Image folder.
Args:
image_size -> size of the image after resize
batch_size
root_dir -> root directory of the dataset (downloaded dataset)
return:
dataloader -> dict includes dataloader for train/test and validation
dataset -> dict includes dataset for train/test and validation
"""
dirs = {'train' : os.path.join(root_dir,train_dir),
'valid' : os.path.join(root_dir,vald_dir),
'test' : os.path.join(root_dir,test_dir)
}
data_transform = {
'train': transforms.Compose([
# transforms.Grayscale(num_output_channels=3),
# transforms.RandomRotation (20),
transforms.Resize(image_size),
# transforms.RandomAffine(degrees =0,translate=(0.1,0.1)),
transforms.ToTensor()
]),
'valid': transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
]),
'test' : transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
])
}
image_dataset = {x: ImageFolder(dirs[x], transform= data_transform[x])
for x in ('train', 'valid','test')}
data_loaders= {x: DataLoader(image_dataset[x], batch_size= batch_size,
shuffle=True, num_workers=12) for x in ['train']}
data_loaders['test'] = DataLoader(image_dataset['test'], batch_size= batch_size,
shuffle=False, num_workers=12, drop_last=True)
data_loaders['valid'] = DataLoader(image_dataset['valid'], batch_size= batch_size,
shuffle=False, num_workers=12, drop_last=True)
dataset_size = {x: len(image_dataset[x]) for x in ['train', 'valid','test']}
print ([f'number of {i} images is {dataset_size[i]}' for i in (dataset_size)])
class_idx= image_dataset['test'].class_to_idx
print (f'Classes with index are: {class_idx}')
class_names = image_dataset['test'].classes
print(class_names)
return data_loaders, image_dataset
#Dataloader and Dataset (Adversarial Samples)
def data_loader_attacks(root_dir, attack_name ,image_size = (224,224), batch_size = 30):
"""
Class to create Dataset and DataLoader from Image folder for adversarial samples generated.
Args:
root _dir: root directory of generated adversarial samples.
attack_name: attack name that has folder in root_dir.
image_size : size of the image after resize (224,224)
batch_size
return:
dataloader : dataloader for the attack
dataset : dataset for attack
"""
dirs = os.path.join(root_dir, f'Test_attacks_{attack_name}')
data_transform = transforms.Compose([transforms.Resize(image_size),
transforms.ToTensor()]
)
image_dataset = ImageFolder(dirs, transform= data_transform)
data_loaders =DataLoader(image_dataset, batch_size= batch_size,
shuffle=False, num_workers=8, drop_last=True)
print (f'number of images is {len(image_dataset)}')
class_idx= image_dataset.class_to_idx
print (f'Classes with index are: {class_idx}')
return data_loaders, image_dataset | 4,070 | 36.694444 | 139 | py |
stylegan-encoder | stylegan-encoder-master/train_effnet.py | """
Trains a modified EfficientNet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import pickle
import cv2
import argparse
import dnnlib
import config
import dnnlib.tflib as tflib
import tensorflow
import keras.backend as K
from efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, preprocess_input
from keras.layers import Input, LocallyConnected1D, Reshape, Permute, Conv2D, Add, Concatenate
from keras.models import Model, load_model
"""
Truncation method from @oneiroid
"""
def truncate_fancy(dlat, dlat_avg, model_scale=18, truncation_psi=0.7, minlayer=0, maxlayer=8, do_clip=False):
layer_idx = np.arange(model_scale)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = np.where(layer_idx < maxlayer, truncation_psi * ones, ones)
if minlayer > 0:
coefs[0, :minlayer, :] = ones[0, :minlayer, :]
if do_clip:
return tflib.lerp_clip(dlat_avg, dlat, coefs).eval()
else:
return tflib.lerp(dlat_avg, dlat, coefs)
def truncate_normal(dlat, dlat_avg, truncation_psi=0.7):
return (dlat - dlat_avg) * truncation_psi + dlat_avg
def generate_dataset_main(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=32, truncation=0.7, fancy_truncation=False):
"""
Generates a dataset of 'n' images of shape ('size', 'size', 3) with random seed 'seed'
along with their dlatent vectors W of shape ('n', 512)
These datasets can serve to train an inverse mapping from X to W as well as explore the latent space
More variation added to latents; also, negative truncation added to balance these examples.
"""
n = n // 2 # this gets doubled because of negative truncation below
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
Gs = load_Gs()
if (model_scale % 3 == 0):
mod_l = 3
else:
mod_l = 2
if seed is not None:
b = bool(np.random.RandomState(seed).randint(2))
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
b = bool(np.random.randint(2))
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
if b:
mod_l = model_scale // 2
mod_r = model_scale // mod_l
if seed is not None:
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
W = Gs.components.mapping.run(Z, None, minibatch_size=minibatch_size) # Use mapping network to get unique dlatents for more variation.
dlatent_avg = Gs.get_var('dlatent_avg') # [component]
if fancy_truncation:
W = np.append(truncate_fancy(W, dlatent_avg, model_scale, truncation), truncate_fancy(W, dlatent_avg, model_scale, -truncation), axis=0)
else:
W = np.append(truncate_normal(W, dlatent_avg, truncation), truncate_normal(W, dlatent_avg, -truncation), axis=0)
W = W[:, :mod_r]
W = W.reshape((n*2, model_scale, 512))
X = Gs.components.synthesis.run(W, randomize_noise=False, minibatch_size=minibatch_size, print_progress=True,
output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
X = np.array([cv2.resize(x, (image_size, image_size), interpolation = cv2.INTER_AREA) for x in X])
X = preprocess_input(X)
return W, X
def generate_dataset(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7, fancy_truncation=False):
"""
Use generate_dataset_main() as a helper function.
Divides requests into batches to save memory.
"""
batch_size = 16
inc = n//batch_size
left = n-((batch_size-1)*inc)
W, X = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
for i in range(batch_size-2):
aW, aX = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
aW, aX = generate_dataset_main(left, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
if save_path is not None:
prefix = '_{}_{}'.format(seed, n)
np.save(os.path.join(os.path.join(save_path, 'W' + prefix)), W)
np.save(os.path.join(os.path.join(save_path, 'X' + prefix)), X)
return W, X
def is_square(n):
return (n == int(math.sqrt(n) + 0.5)**2)
def get_effnet_model(save_path, model_res=1024, image_size=256, depth=1, size=3, activation='elu', loss='logcosh', optimizer='adam'):
if os.path.exists(save_path):
print('Loading model')
return load_model(save_path)
# Build model
print('Building model')
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if (size <= 0):
effnet = EfficientNetB0(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size == 1):
effnet = EfficientNetB1(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size == 2):
effnet = EfficientNetB2(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size >= 3):
effnet = EfficientNetB3(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
layer_size = model_scale*8*8*8
if is_square(layer_size): # work out layer dimensions
layer_l = int(math.sqrt(layer_size)+0.5)
layer_r = layer_l
else:
layer_m = math.log(math.sqrt(layer_size),2)
layer_l = 2**math.ceil(layer_m)
layer_r = layer_size // layer_l
layer_l = int(layer_l)
layer_r = int(layer_r)
x_init = None
inp = Input(shape=(image_size, image_size, 3))
x = effnet(inp)
if (size < 1):
x = Conv2D(model_scale*8, 1, activation=activation)(x) # scale down
if (depth > 0):
x = Reshape((layer_r, layer_l))(x) # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
else:
if (depth < 1):
depth = 1
if (size <= 2):
x = Conv2D(model_scale*8*4, 1, activation=activation)(x) # scale down a bit
x = Reshape((layer_r*2, layer_l*2))(x) # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
else:
x = Reshape((384,256))(x) # full size for B3
while (depth > 0):
x = LocallyConnected1D(layer_r, 1, activation=activation)(x)
x = Permute((2, 1))(x)
x = LocallyConnected1D(layer_l, 1, activation=activation)(x)
x = Permute((2, 1))(x)
if x_init is not None:
x = Add()([x, x_init]) # add skip connection
x_init = x
depth-=1
if (size >= 2): # add unshared layers at end for different sections of the latent space
x_init = x
if layer_r % 3 == 0 and layer_l % 3 == 0:
a = LocallyConnected1D(layer_r, 1, activation=activation)(x)
b = LocallyConnected1D(layer_r, 1, activation=activation)(x)
c = LocallyConnected1D(layer_r, 1, activation=activation)(x)
a = Permute((2, 1))(a)
b = Permute((2, 1))(b)
c = Permute((2, 1))(c)
a = LocallyConnected1D(layer_l//3, 1, activation=activation)(a)
b = LocallyConnected1D(layer_l//3, 1, activation=activation)(b)
c = LocallyConnected1D(layer_l//3, 1, activation=activation)(c)
x = Concatenate()([a,b,c])
else:
a = LocallyConnected1D(layer_l, 1, activation=activation)(x)
b = LocallyConnected1D(layer_l, 1, activation=activation)(x)
a = Permute((2, 1))(a)
b = Permute((2, 1))(b)
a = LocallyConnected1D(layer_r//2, 1, activation=activation)(a)
b = LocallyConnected1D(layer_r//2, 1, activation=activation)(b)
x = Concatenate()([a,b])
x = Add()([x, x_init]) # add skip connection
x = Reshape((model_scale, 512))(x) # train against all dlatent values
model = Model(inputs=inp,outputs=x)
model.compile(loss=loss, metrics=[], optimizer=optimizer) # By default: adam optimizer, logcosh used for loss.
return model
def finetune_effnet(model, args):
"""
Finetunes an EfficientNet to predict W from X
Generate batches (X, W) of size 'batch_size', iterates 'n_epochs', and repeat while 'max_patience' is reached
on the test set. The model is saved every time a new best test loss is reached.
"""
save_path = args.model_path
model_res=args.model_res
image_size=args.image_size
batch_size=args.batch_size
test_size=args.test_size
max_patience=args.max_patience
n_epochs=args.epochs
seed=args.seed
minibatch_size=args.minibatch_size
truncation=args.truncation
fancy_truncation=args.fancy_truncation
use_ktrain=args.use_ktrain
ktrain_max_lr=args.ktrain_max_lr
ktrain_reduce_lr=args.ktrain_reduce_lr
ktrain_stop_early=args.ktrain_stop_early
assert image_size >= 224
# Create a test set
np.random.seed(seed)
print('Creating test set:')
W_test, X_test = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
# Iterate on batches of size batch_size
print('Generating training set:')
patience = 0
epoch = -1
best_loss = np.inf
#loss = model.evaluate(X_test, W_test)
#print('Initial test loss : {:.5f}'.format(loss))
while (patience <= max_patience):
W_train = X_train = None
W_train, X_train = generate_dataset(batch_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
if use_ktrain:
print('Creating validation set:')
W_val, X_val = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
learner = ktrain.get_learner(model=model,
train_data=(X_train, W_train), val_data=(X_val, W_val),
workers=1, use_multiprocessing=False,
batch_size=minibatch_size)
#learner.lr_find() # simulate training to find good learning rate
#learner.lr_plot() # visually identify best learning rate
learner.autofit(ktrain_max_lr, checkpoint_folder='/tmp', reduce_on_plateau=ktrain_reduce_lr, early_stopping=ktrain_stop_early)
learner = None
print('Done with current validation set.')
model.fit(X_val, W_val, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
else:
model.fit(X_train, W_train, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
loss = model.evaluate(X_test, W_test, batch_size=minibatch_size)
if loss < best_loss:
print('New best test loss : {:.5f}'.format(loss))
patience = 0
best_loss = loss
else:
print('Test loss : {:.5f}'.format(loss))
patience += 1
if (patience > max_patience): # When done with test set, train with it and discard.
print('Done with current test set.')
model.fit(X_test, W_test, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
print('Saving model.')
model.save(save_path)
parser = argparse.ArgumentParser(description='Train an EfficientNet to predict latent representations of images in a StyleGAN model from generated examples', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL')
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--data_dir', default='data', help='Directory for storing the EfficientNet model')
parser.add_argument('--model_path', default='data/finetuned_effnet.h5', help='Save / load / create the EfficientNet model with this file path')
parser.add_argument('--model_depth', default=1, help='Number of TreeConnect layers to add after EfficientNet', type=int)
parser.add_argument('--model_size', default=1, help='Model size - 0 - small, 1 - medium, 2 - large, or 3 - full size.', type=int)
parser.add_argument('--use_ktrain', default=False, help='Use ktrain for training', type=bool)
parser.add_argument('--ktrain_max_lr', default=0.001, help='Maximum learning rate for ktrain', type=float)
parser.add_argument('--ktrain_reduce_lr', default=1, help='Patience for reducing learning rate after a plateau for ktrain', type=float)
parser.add_argument('--ktrain_stop_early', default=3, help='Patience for early stopping for ktrain', type=float)
parser.add_argument('--activation', default='elu', help='Activation function to use after EfficientNet')
parser.add_argument('--optimizer', default='adam', help='Optimizer to use')
parser.add_argument('--loss', default='logcosh', help='Loss function to use')
parser.add_argument('--use_fp16', default=False, help='Use 16-bit floating point', type=bool)
parser.add_argument('--image_size', default=256, help='Size of images for EfficientNet model', type=int)
parser.add_argument('--batch_size', default=2048, help='Batch size for training the EfficientNet model', type=int)
parser.add_argument('--test_size', default=512, help='Batch size for testing the EfficientNet model', type=int)
parser.add_argument('--truncation', default=0.7, help='Generate images using truncation trick', type=float)
parser.add_argument('--fancy_truncation', default=True, help='Use fancier truncation proposed by @oneiroid', type=float)
parser.add_argument('--max_patience', default=2, help='Number of iterations to wait while test loss does not improve', type=int)
parser.add_argument('--freeze_first', default=False, help='Start training with the pre-trained network frozen, then unfreeze', type=bool)
parser.add_argument('--epochs', default=2, help='Number of training epochs to run for each batch', type=int)
parser.add_argument('--minibatch_size', default=16, help='Size of minibatches for training and generation', type=int)
parser.add_argument('--seed', default=-1, help='Pick a random seed for reproducibility (-1 for no random seed selected)', type=int)
parser.add_argument('--loop', default=-1, help='Run this many iterations (-1 for infinite, halt with CTRL-C)', type=int)
args, other_args = parser.parse_known_args()
os.makedirs(args.data_dir, exist_ok=True)
if args.seed == -1:
args.seed = None
if args.use_fp16:
K.set_floatx('float16')
K.set_epsilon(1e-4)
if args.use_ktrain:
import ktrain
tflib.init_tf()
model = get_effnet_model(args.model_path, model_res=args.model_res, depth=args.model_depth, size=args.model_size, activation=args.activation, optimizer=args.optimizer, loss=args.loss)
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
def load_Gs():
return Gs_network
#K.get_session().run(tensorflow.global_variables_initializer())
if args.freeze_first:
model.layers[1].trainable = False
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.freeze_first: # run a training iteration first while pretrained model is frozen, then unfreeze.
finetune_effnet(model, args)
model.layers[1].trainable = True
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.loop < 0:
while True:
finetune_effnet(model, args)
else:
count = args.loop
while count > 0:
finetune_effnet(model, args)
count -= 1
| 16,451 | 47.674556 | 213 | py |
stylegan-encoder | stylegan-encoder-master/train_resnet.py | """
Trains a modified Resnet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import pickle
import cv2
import argparse
import dnnlib
import config
import dnnlib.tflib as tflib
import tensorflow
import keras
import keras.backend as K
from keras.applications.resnet50 import preprocess_input
from keras.layers import Input, LocallyConnected1D, Reshape, Permute, Conv2D, Add
from keras.models import Model, load_model
def generate_dataset_main(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7):
"""
Generates a dataset of 'n' images of shape ('size', 'size', 3) with random seed 'seed'
along with their dlatent vectors W of shape ('n', 512)
These datasets can serve to train an inverse mapping from X to W as well as explore the latent space
More variation added to latents; also, negative truncation added to balance these examples.
"""
n = n // 2 # this gets doubled because of negative truncation below
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
Gs = load_Gs()
if (model_scale % 3 == 0):
mod_l = 3
else:
mod_l = 2
if seed is not None:
b = bool(np.random.RandomState(seed).randint(2))
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
b = bool(np.random.randint(2))
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
if b:
mod_l = model_scale // 2
mod_r = model_scale // mod_l
if seed is not None:
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
W = Gs.components.mapping.run(Z, None, minibatch_size=minibatch_size) # Use mapping network to get unique dlatents for more variation.
dlatent_avg = Gs.get_var('dlatent_avg') # [component]
W = (W[np.newaxis] - dlatent_avg) * np.reshape([truncation, -truncation], [-1, 1, 1, 1]) + dlatent_avg # truncation trick and add negative image pair
W = np.append(W[0], W[1], axis=0)
W = W[:, :mod_r]
W = W.reshape((n*2, model_scale, 512))
X = Gs.components.synthesis.run(W, randomize_noise=False, minibatch_size=minibatch_size, print_progress=True,
output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
X = np.array([cv2.resize(x, (image_size, image_size), interpolation = cv2.INTER_AREA) for x in X])
#X = preprocess_input(X, backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
X = preprocess_input(X)
return W, X
def generate_dataset(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7):
"""
Use generate_dataset_main() as a helper function.
Divides requests into batches to save memory.
"""
batch_size = 16
inc = n//batch_size
left = n-((batch_size-1)*inc)
W, X = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation)
for i in range(batch_size-2):
aW, aX = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
aW, aX = generate_dataset_main(left, save_path, seed, model_res, image_size, minibatch_size, truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
if save_path is not None:
prefix = '_{}_{}'.format(seed, n)
np.save(os.path.join(os.path.join(save_path, 'W' + prefix)), W)
np.save(os.path.join(os.path.join(save_path, 'X' + prefix)), X)
return W, X
def is_square(n):
return (n == int(math.sqrt(n) + 0.5)**2)
def get_resnet_model(save_path, model_res=1024, image_size=256, depth=2, size=0, activation='elu', loss='logcosh', optimizer='adam'):
# Build model
if os.path.exists(save_path):
print('Loading model')
return load_model(save_path)
print('Building model')
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if size <= 0:
from keras.applications.resnet50 import ResNet50
resnet = ResNet50(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3))
else:
from keras_applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
if size == 1:
resnet = ResNet50V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
if size == 2:
resnet = ResNet101V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
if size >= 3:
resnet = ResNet152V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
layer_size = model_scale*8*8*8
if is_square(layer_size): # work out layer dimensions
layer_l = int(math.sqrt(layer_size)+0.5)
layer_r = layer_l
else:
layer_m = math.log(math.sqrt(layer_size),2)
layer_l = 2**math.ceil(layer_m)
layer_r = layer_size // layer_l
layer_l = int(layer_l)
layer_r = int(layer_r)
x_init = None
inp = Input(shape=(image_size, image_size, 3))
x = resnet(inp)
if (depth < 0):
depth = 1
if (size <= 1):
if (size <= 0):
x = Conv2D(model_scale*8, 1, activation=activation)(x) # scale down
x = Reshape((layer_r, layer_l))(x)
else:
x = Conv2D(model_scale*8*4, 1, activation=activation)(x) # scale down a little
x = Reshape((layer_r*2, layer_l*2))(x)
else:
if (size == 2):
x = Conv2D(1024, 1, activation=activation)(x) # scale down a bit
x = Reshape((256, 256))(x)
else:
x = Reshape((256, 512))(x) # all weights used
while (depth > 0): # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
x = LocallyConnected1D(layer_r, 1, activation=activation)(x)
x = Permute((2, 1))(x)
x = LocallyConnected1D(layer_l, 1, activation=activation)(x)
x = Permute((2, 1))(x)
if x_init is not None:
x = Add()([x, x_init]) # add skip connection
x_init = x
depth-=1
x = Reshape((model_scale, 512))(x) # train against all dlatent values
model = Model(inputs=inp,outputs=x)
model.compile(loss=loss, metrics=[], optimizer=optimizer) # By default: adam optimizer, logcosh used for loss.
return model
def finetune_resnet(model, save_path, model_res=1024, image_size=256, batch_size=10000, test_size=1000, n_epochs=10, max_patience=5, seed=0, minibatch_size=32, truncation=0.7):
"""
Finetunes a resnet to predict W from X
Generate batches (X, W) of size 'batch_size', iterates 'n_epochs', and repeat while 'max_patience' is reached
on the test set. The model is saved every time a new best test loss is reached.
"""
assert image_size >= 224
# Create a test set
print('Creating test set:')
np.random.seed(seed)
W_test, X_test = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation)
# Iterate on batches of size batch_size
print('Generating training set:')
patience = 0
best_loss = np.inf
#loss = model.evaluate(X_test, W_test)
#print('Initial test loss : {:.5f}'.format(loss))
while (patience <= max_patience):
W_train = X_train = None
W_train, X_train = generate_dataset(batch_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation)
model.fit(X_train, W_train, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
loss = model.evaluate(X_test, W_test, batch_size=minibatch_size)
if loss < best_loss:
print('New best test loss : {:.5f}'.format(loss))
patience = 0
best_loss = loss
else:
print('Test loss : {:.5f}'.format(loss))
patience += 1
if (patience > max_patience): # When done with test set, train with it and discard.
print('Done with current test set.')
model.fit(X_test, W_test, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
print('Saving model.')
model.save(save_path)
parser = argparse.ArgumentParser(description='Train a ResNet to predict latent representations of images in a StyleGAN model from generated examples', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL')
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--data_dir', default='data', help='Directory for storing the ResNet model')
parser.add_argument('--model_path', default='data/finetuned_resnet.h5', help='Save / load / create the ResNet model with this file path')
parser.add_argument('--model_depth', default=1, help='Number of TreeConnect layers to add after ResNet', type=int)
parser.add_argument('--model_size', default=1, help='Model size - 0 - small, 1 - medium, 2 - large, 3 - full.', type=int)
parser.add_argument('--activation', default='elu', help='Activation function to use after ResNet')
parser.add_argument('--optimizer', default='adam', help='Optimizer to use')
parser.add_argument('--loss', default='logcosh', help='Loss function to use')
parser.add_argument('--use_fp16', default=False, help='Use 16-bit floating point', type=bool)
parser.add_argument('--image_size', default=256, help='Size of images for ResNet model', type=int)
parser.add_argument('--batch_size', default=2048, help='Batch size for training the ResNet model', type=int)
parser.add_argument('--test_size', default=512, help='Batch size for testing the ResNet model', type=int)
parser.add_argument('--truncation', default=0.7, help='Generate images using truncation trick', type=float)
parser.add_argument('--max_patience', default=2, help='Number of iterations to wait while test loss does not improve', type=int)
parser.add_argument('--freeze_first', default=False, help='Start training with the pre-trained network frozen, then unfreeze', type=bool)
parser.add_argument('--epochs', default=2, help='Number of training epochs to run for each batch', type=int)
parser.add_argument('--minibatch_size', default=16, help='Size of minibatches for training and generation', type=int)
parser.add_argument('--seed', default=-1, help='Pick a random seed for reproducibility (-1 for no random seed selected)', type=int)
parser.add_argument('--loop', default=-1, help='Run this many iterations (-1 for infinite, halt with CTRL-C)', type=int)
args, other_args = parser.parse_known_args()
os.makedirs(args.data_dir, exist_ok=True)
if args.seed == -1:
args.seed = None
if args.use_fp16:
K.set_floatx('float16')
K.set_epsilon(1e-4)
tflib.init_tf()
model = get_resnet_model(args.model_path, model_res=args.model_res, depth=args.model_depth, size=args.model_size, activation=args.activation, optimizer=args.optimizer, loss=args.loss)
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
def load_Gs():
return Gs_network
if args.freeze_first:
model.layers[1].trainable = False
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.freeze_first: # run a training iteration first while pretrained model is frozen, then unfreeze.
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
model.layers[1].trainable = True
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.loop < 0:
while True:
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
else:
count = args.loop
while count > 0:
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
count -= 1
| 13,439 | 49.337079 | 289 | py |
stylegan-encoder | stylegan-encoder-master/align_images.py | import os
import sys
import bz2
import argparse
from keras.utils import get_file
from ffhq_dataset.face_alignment import image_align
from ffhq_dataset.landmarks_detector import LandmarksDetector
import multiprocessing
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
if __name__ == "__main__":
"""
Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
python align_images.py /raw_images /aligned_images
"""
parser = argparse.ArgumentParser(description='Align faces from input images', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('raw_dir', help='Directory with raw images for face alignment')
parser.add_argument('aligned_dir', help='Directory for storing aligned images')
parser.add_argument('--output_size', default=1024, help='The dimension of images for input to the model', type=int)
parser.add_argument('--x_scale', default=1, help='Scaling factor for x dimension', type=float)
parser.add_argument('--y_scale', default=1, help='Scaling factor for y dimension', type=float)
parser.add_argument('--em_scale', default=0.1, help='Scaling factor for eye-mouth distance', type=float)
parser.add_argument('--use_alpha', default=False, help='Add an alpha channel for masking', type=bool)
args, other_args = parser.parse_known_args()
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
RAW_IMAGES_DIR = args.raw_dir
ALIGNED_IMAGES_DIR = args.aligned_dir
landmarks_detector = LandmarksDetector(landmarks_model_path)
for img_name in os.listdir(RAW_IMAGES_DIR):
print('Aligning %s ...' % img_name)
try:
raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1)
if os.path.isfile(fn):
continue
print('Getting landmarks...')
for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
try:
print('Starting face alignment...')
face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha)
print('Wrote result %s' % aligned_face_path)
except:
print("Exception in face alignment!")
except:
print("Exception in landmark detection!")
| 3,050 | 48.209677 | 200 | py |
stylegan-encoder | stylegan-encoder-master/swa.py | """
Stochastic Weight Averaging: https://arxiv.org/abs/1803.05407
See: https://github.com/kristpapadopoulos/keras-stochastic-weight-averaging
"""
import os
import glob
import pickle
import argparse
from dnnlib.tflib import init_tf
filepath = 'output.pkl'
def fetch_models_from_files(model_list):
for fn in model_list:
with open(fn, 'rb') as f:
yield pickle.load(f)
def apply_swa_to_checkpoints(models):
gen, dis, gs = next(models)
print('Loading', end='', flush=True)
mod_gen = gen
mod_dis = dis
mod_gs = gs
epoch = 0
try:
while True:
epoch += 1
gen, dis, gs = next(models)
if gs is None:
print("")
break
mod_gen.apply_swa(gen, epoch)
mod_dis.apply_swa(dis, epoch)
mod_gs.apply_swa(gs, epoch)
print('.', end='', flush=True)
except:
print("")
return (mod_gen, mod_dis, mod_gs)
parser = argparse.ArgumentParser(description='Perform stochastic weight averaging', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('results_dir', help='Directory with network checkpoints for weight averaging')
parser.add_argument('--filespec', default='network*.pkl', help='The files to average')
parser.add_argument('--output_model', default='network_avg.pkl', help='The averaged model to output')
parser.add_argument('--count', default=6, help='Average the last n checkpoints', type=int)
args, other_args = parser.parse_known_args()
swa_epochs = args.count
filepath = args.output_model
files = glob.glob(os.path.join(args.results_dir,args.filespec))
if (len(files)>swa_epochs):
files = files[-swa_epochs:]
files.sort()
print(files)
init_tf()
models = fetch_models_from_files(files)
swa_models = apply_swa_to_checkpoints(models)
print('Final model parameters set to stochastic weight average.')
with open(filepath, 'wb') as f:
pickle.dump(swa_models, f)
print('Final stochastic averaged weights saved to file.')
| 2,025 | 31.15873 | 139 | py |
stylegan-encoder | stylegan-encoder-master/encode_images.py | import os
import argparse
import pickle
from tqdm import tqdm
import PIL.Image
from PIL import ImageFilter
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel, load_images
#from tensorflow.keras.models import load_model
from keras.models import load_model
from keras.applications.resnet50 import preprocess_input
def split_to_batches(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser(description='Find latent representation of reference images using perceptual losses', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('src_dir', help='Directory with images for encoding')
parser.add_argument('generated_images_dir', help='Directory for storing generated images')
parser.add_argument('dlatent_dir', help='Directory for storing dlatent representations')
parser.add_argument('--data_dir', default='data', help='Directory for storing optional models')
parser.add_argument('--mask_dir', default='masks', help='Directory for storing optional masks')
parser.add_argument('--load_last', default='', help='Start with embeddings from directory')
parser.add_argument('--dlatent_avg', default='', help='Use dlatent from file specified here for truncation instead of dlatent_avg from Gs')
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL') # karras2019stylegan-ffhq-1024x1024.pkl
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--batch_size', default=1, help='Batch size for generator and perceptual model', type=int)
parser.add_argument('--optimizer', default='ggt', help='Optimization algorithm used for optimizing dlatents')
# Perceptual model params
parser.add_argument('--image_size', default=256, help='Size of images for perceptual model', type=int)
parser.add_argument('--resnet_image_size', default=256, help='Size of images for the Resnet model', type=int)
parser.add_argument('--lr', default=0.25, help='Learning rate for perceptual model', type=float)
parser.add_argument('--decay_rate', default=0.9, help='Decay rate for learning rate', type=float)
parser.add_argument('--iterations', default=100, help='Number of optimization steps for each batch', type=int)
parser.add_argument('--decay_steps', default=4, help='Decay steps for learning rate decay (as a percent of iterations)', type=float)
parser.add_argument('--early_stopping', default=True, help='Stop early once training stabilizes', type=str2bool, nargs='?', const=True)
parser.add_argument('--early_stopping_threshold', default=0.5, help='Stop after this threshold has been reached', type=float)
parser.add_argument('--early_stopping_patience', default=10, help='Number of iterations to wait below threshold', type=int)
parser.add_argument('--load_effnet', default='data/finetuned_effnet.h5', help='Model to load for EfficientNet approximation of dlatents')
parser.add_argument('--load_resnet', default='data/finetuned_resnet.h5', help='Model to load for ResNet approximation of dlatents')
parser.add_argument('--use_preprocess_input', default=True, help='Call process_input() first before using feed forward net', type=str2bool, nargs='?', const=True)
parser.add_argument('--use_best_loss', default=True, help='Output the lowest loss value found as the solution', type=str2bool, nargs='?', const=True)
parser.add_argument('--average_best_loss', default=0.25, help='Do a running weighted average with the previous best dlatents found', type=float)
parser.add_argument('--sharpen_input', default=True, help='Sharpen the input images', type=str2bool, nargs='?', const=True)
# Loss function options
parser.add_argument('--use_vgg_loss', default=0.4, help='Use VGG perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_vgg_layer', default=9, help='Pick which VGG layer to use.', type=int)
parser.add_argument('--use_pixel_loss', default=1.5, help='Use logcosh image pixel loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_mssim_loss', default=200, help='Use MS-SIM perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_lpips_loss', default=100, help='Use LPIPS perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_l1_penalty', default=0.5, help='Use L1 penalty on latents; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_discriminator_loss', default=0.5, help='Use trained discriminator to evaluate realism.', type=float)
parser.add_argument('--use_adaptive_loss', default=False, help='Use the adaptive robust loss function from Google Research for pixel and VGG feature loss.', type=str2bool, nargs='?', const=True)
# Generator params
parser.add_argument('--randomize_noise', default=False, help='Add noise to dlatents during optimization', type=str2bool, nargs='?', const=True)
parser.add_argument('--tile_dlatents', default=False, help='Tile dlatents to use a single vector at each scale', type=str2bool, nargs='?', const=True)
parser.add_argument('--clipping_threshold', default=2.0, help='Stochastic clipping of gradient values outside of this threshold', type=float)
# Masking params
parser.add_argument('--load_mask', default=False, help='Load segmentation masks', type=str2bool, nargs='?', const=True)
parser.add_argument('--face_mask', default=True, help='Generate a mask for predicting only the face area', type=str2bool, nargs='?', const=True)
parser.add_argument('--use_grabcut', default=True, help='Use grabcut algorithm on the face mask to better segment the foreground', type=str2bool, nargs='?', const=True)
parser.add_argument('--scale_mask', default=1.4, help='Look over a wider section of foreground for grabcut', type=float)
parser.add_argument('--composite_mask', default=True, help='Merge the unmasked area back into the generated image', type=str2bool, nargs='?', const=True)
parser.add_argument('--composite_blur', default=8, help='Size of blur filter to smoothly composite the images', type=int)
# Video params
parser.add_argument('--video_dir', default='videos', help='Directory for storing training videos')
parser.add_argument('--output_video', default=False, help='Generate videos of the optimization process', type=bool)
parser.add_argument('--video_codec', default='MJPG', help='FOURCC-supported video codec name')
parser.add_argument('--video_frame_rate', default=24, help='Video frames per second', type=int)
parser.add_argument('--video_size', default=512, help='Video size in pixels', type=int)
parser.add_argument('--video_skip', default=1, help='Only write every n frames (1 = write every frame)', type=int)
args, other_args = parser.parse_known_args()
args.decay_steps *= 0.01 * args.iterations # Calculate steps as a percent of total iterations
if args.output_video:
import cv2
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=args.batch_size)
ref_images = [os.path.join(args.src_dir, x) for x in os.listdir(args.src_dir)]
ref_images = list(filter(os.path.isfile, ref_images))
if len(ref_images) == 0:
raise Exception('%s is empty' % args.src_dir)
os.makedirs(args.data_dir, exist_ok=True)
os.makedirs(args.mask_dir, exist_ok=True)
os.makedirs(args.generated_images_dir, exist_ok=True)
os.makedirs(args.dlatent_dir, exist_ok=True)
os.makedirs(args.video_dir, exist_ok=True)
# Initialize generator and perceptual model
tflib.init_tf()
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
generator = Generator(Gs_network, args.batch_size, clipping_threshold=args.clipping_threshold, tiled_dlatent=args.tile_dlatents, model_res=args.model_res, randomize_noise=args.randomize_noise)
if (args.dlatent_avg != ''):
generator.set_dlatent_avg(np.load(args.dlatent_avg))
perc_model = None
if (args.use_lpips_loss > 0.00000001):
with dnnlib.util.open_url('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2', cache_dir=config.cache_dir) as f:
perc_model = pickle.load(f)
perceptual_model = PerceptualModel(args, perc_model=perc_model, batch_size=args.batch_size)
perceptual_model.build_perceptual_model(generator, discriminator_network)
ff_model = None
# Optimize (only) dlatents by minimizing perceptual loss between reference and generated images in feature space
for images_batch in tqdm(split_to_batches(ref_images, args.batch_size), total=len(ref_images)//args.batch_size):
names = [os.path.splitext(os.path.basename(x))[0] for x in images_batch]
if args.output_video:
video_out = {}
for name in names:
video_out[name] = cv2.VideoWriter(os.path.join(args.video_dir, f'{name}.avi'),cv2.VideoWriter_fourcc(*args.video_codec), args.video_frame_rate, (args.video_size,args.video_size))
perceptual_model.set_reference_images(images_batch)
dlatents = None
if (args.load_last != ''): # load previous dlatents for initialization
for name in names:
dl = np.expand_dims(np.load(os.path.join(args.load_last, f'{name}.npy')),axis=0)
if (dlatents is None):
dlatents = dl
else:
dlatents = np.vstack((dlatents,dl))
else:
if (ff_model is None):
if os.path.exists(args.load_resnet):
from keras.applications.resnet50 import preprocess_input
print("Loading ResNet Model:")
ff_model = load_model(args.load_resnet)
if (ff_model is None):
if os.path.exists(args.load_effnet):
import efficientnet
from efficientnet import preprocess_input
print("Loading EfficientNet Model:")
ff_model = load_model(args.load_effnet)
if (ff_model is not None): # predict initial dlatents with ResNet model
if (args.use_preprocess_input):
dlatents = ff_model.predict(preprocess_input(load_images(images_batch,image_size=args.resnet_image_size)))
else:
dlatents = ff_model.predict(load_images(images_batch,image_size=args.resnet_image_size))
if dlatents is not None:
generator.set_dlatents(dlatents)
op = perceptual_model.optimize(generator.dlatent_variable, iterations=args.iterations, use_optimizer=args.optimizer)
pbar = tqdm(op, leave=False, total=args.iterations)
vid_count = 0
best_loss = None
best_dlatent = None
avg_loss_count = 0
if args.early_stopping:
avg_loss = prev_loss = None
for loss_dict in pbar:
if args.early_stopping: # early stopping feature
if prev_loss is not None:
if avg_loss is not None:
avg_loss = 0.5 * avg_loss + (prev_loss - loss_dict["loss"])
if avg_loss < args.early_stopping_threshold: # count while under threshold; else reset
avg_loss_count += 1
else:
avg_loss_count = 0
if avg_loss_count > args.early_stopping_patience: # stop once threshold is reached
print("")
break
else:
avg_loss = prev_loss - loss_dict["loss"]
pbar.set_description(" ".join(names) + ": " + "; ".join(["{} {:.4f}".format(k, v) for k, v in loss_dict.items()]))
if best_loss is None or loss_dict["loss"] < best_loss:
if best_dlatent is None or args.average_best_loss <= 0.00000001:
best_dlatent = generator.get_dlatents()
else:
best_dlatent = 0.25 * best_dlatent + 0.75 * generator.get_dlatents()
if args.use_best_loss:
generator.set_dlatents(best_dlatent)
best_loss = loss_dict["loss"]
if args.output_video and (vid_count % args.video_skip == 0):
batch_frames = generator.generate_images()
for i, name in enumerate(names):
video_frame = PIL.Image.fromarray(batch_frames[i], 'RGB').resize((args.video_size,args.video_size),PIL.Image.LANCZOS)
video_out[name].write(cv2.cvtColor(np.array(video_frame).astype('uint8'), cv2.COLOR_RGB2BGR))
generator.stochastic_clip_dlatents()
prev_loss = loss_dict["loss"]
if not args.use_best_loss:
best_loss = prev_loss
print(" ".join(names), " Loss {:.4f}".format(best_loss))
if args.output_video:
for name in names:
video_out[name].release()
# Generate images from found dlatents and save them
if args.use_best_loss:
generator.set_dlatents(best_dlatent)
generated_images = generator.generate_images()
generated_dlatents = generator.get_dlatents()
for img_array, dlatent, img_path, img_name in zip(generated_images, generated_dlatents, images_batch, names):
mask_img = None
if args.composite_mask and (args.load_mask or args.face_mask):
_, im_name = os.path.split(img_path)
mask_img = os.path.join(args.mask_dir, f'{im_name}')
if args.composite_mask and mask_img is not None and os.path.isfile(mask_img):
orig_img = PIL.Image.open(img_path).convert('RGB')
width, height = orig_img.size
imask = PIL.Image.open(mask_img).convert('L').resize((width, height))
imask = imask.filter(ImageFilter.GaussianBlur(args.composite_blur))
mask = np.array(imask)/255
mask = np.expand_dims(mask,axis=-1)
img_array = mask*np.array(img_array) + (1.0-mask)*np.array(orig_img)
img_array = img_array.astype(np.uint8)
#img_array = np.where(mask, np.array(img_array), orig_img)
img = PIL.Image.fromarray(img_array, 'RGB')
img.save(os.path.join(args.generated_images_dir, f'{img_name}.png'), 'PNG')
np.save(os.path.join(args.dlatent_dir, f'{img_name}.npy'), dlatent)
generator.reset_dlatents()
if __name__ == "__main__":
main()
| 15,281 | 62.14876 | 211 | py |
stylegan-encoder | stylegan-encoder-master/encoder/perceptual_model.py | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
#import tensorflow_probability as tfp
#tf.enable_eager_execution()
import os
import bz2
import PIL.Image
from PIL import ImageFilter
import numpy as np
from keras.models import Model
from keras.utils import get_file
from keras.applications.vgg16 import VGG16, preprocess_input
import keras.backend as K
import traceback
import dnnlib.tflib as tflib
def load_images(images_list, image_size=256, sharpen=False):
loaded_images = list()
for img_path in images_list:
img = PIL.Image.open(img_path).convert('RGB')
if image_size is not None:
img = img.resize((image_size,image_size),PIL.Image.LANCZOS)
if (sharpen):
img = img.filter(ImageFilter.DETAIL)
img = np.array(img)
img = np.expand_dims(img, 0)
loaded_images.append(img)
loaded_images = np.vstack(loaded_images)
return loaded_images
def tf_custom_adaptive_loss(a,b):
from adaptive import lossfun
shape = a.get_shape().as_list()
dim = np.prod(shape[1:])
a = tf.reshape(a, [-1, dim])
b = tf.reshape(b, [-1, dim])
loss, _, _ = lossfun(b-a, var_suffix='1')
return tf.math.reduce_mean(loss)
def tf_custom_adaptive_rgb_loss(a,b):
from adaptive import image_lossfun
loss, _, _ = image_lossfun(b-a, color_space='RGB', representation='PIXEL')
return tf.math.reduce_mean(loss)
def tf_custom_l1_loss(img1,img2):
return tf.math.reduce_mean(tf.math.abs(img2-img1), axis=None)
def tf_custom_logcosh_loss(img1,img2):
return tf.math.reduce_mean(tf.keras.losses.logcosh(img1,img2))
def create_stub(batch_size):
return tf.constant(0, dtype='float32', shape=(batch_size, 0))
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
class PerceptualModel:
def __init__(self, args, batch_size=1, perc_model=None, sess=None):
self.sess = tf.get_default_session() if sess is None else sess
K.set_session(self.sess)
self.epsilon = 0.00000001
self.lr = args.lr
self.decay_rate = args.decay_rate
self.decay_steps = args.decay_steps
self.img_size = args.image_size
self.layer = args.use_vgg_layer
self.vgg_loss = args.use_vgg_loss
self.face_mask = args.face_mask
self.use_grabcut = args.use_grabcut
self.scale_mask = args.scale_mask
self.mask_dir = args.mask_dir
if (self.layer <= 0 or self.vgg_loss <= self.epsilon):
self.vgg_loss = None
self.pixel_loss = args.use_pixel_loss
if (self.pixel_loss <= self.epsilon):
self.pixel_loss = None
self.mssim_loss = args.use_mssim_loss
if (self.mssim_loss <= self.epsilon):
self.mssim_loss = None
self.lpips_loss = args.use_lpips_loss
if (self.lpips_loss <= self.epsilon):
self.lpips_loss = None
self.l1_penalty = args.use_l1_penalty
if (self.l1_penalty <= self.epsilon):
self.l1_penalty = None
self.adaptive_loss = args.use_adaptive_loss
self.sharpen_input = args.sharpen_input
self.batch_size = batch_size
if perc_model is not None and self.lpips_loss is not None:
self.perc_model = perc_model
else:
self.perc_model = None
self.ref_img = None
self.ref_weight = None
self.perceptual_model = None
self.ref_img_features = None
self.features_weight = None
self.loss = None
self.discriminator_loss = args.use_discriminator_loss
if (self.discriminator_loss <= self.epsilon):
self.discriminator_loss = None
if self.discriminator_loss is not None:
self.discriminator = None
self.stub = create_stub(batch_size)
if self.face_mask:
import dlib
self.detector = dlib.get_frontal_face_detector()
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
self.predictor = dlib.shape_predictor(landmarks_model_path)
def add_placeholder(self, var_name):
var_val = getattr(self, var_name)
setattr(self, var_name + "_placeholder", tf.placeholder(var_val.dtype, shape=var_val.get_shape()))
setattr(self, var_name + "_op", var_val.assign(getattr(self, var_name + "_placeholder")))
def assign_placeholder(self, var_name, var_val):
self.sess.run(getattr(self, var_name + "_op"), {getattr(self, var_name + "_placeholder"): var_val})
def build_perceptual_model(self, generator, discriminator=None):
# Learning rate
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")
incremented_global_step = tf.assign_add(global_step, 1)
self._reset_global_step = tf.assign(global_step, 0)
self.learning_rate = tf.train.exponential_decay(self.lr, incremented_global_step,
self.decay_steps, self.decay_rate, staircase=True)
self.sess.run([self._reset_global_step])
if self.discriminator_loss is not None:
self.discriminator = discriminator
generated_image_tensor = generator.generated_image
generated_image = tf.image.resize_nearest_neighbor(generated_image_tensor,
(self.img_size, self.img_size), align_corners=True)
self.ref_img = tf.get_variable('ref_img', shape=generated_image.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.ref_weight = tf.get_variable('ref_weight', shape=generated_image.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.add_placeholder("ref_img")
self.add_placeholder("ref_weight")
if (self.vgg_loss is not None):
vgg16 = VGG16(include_top=False, input_shape=(self.img_size, self.img_size, 3))
self.perceptual_model = Model(vgg16.input, vgg16.layers[self.layer].output)
generated_img_features = self.perceptual_model(preprocess_input(self.ref_weight * generated_image))
self.ref_img_features = tf.get_variable('ref_img_features', shape=generated_img_features.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.features_weight = tf.get_variable('features_weight', shape=generated_img_features.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.sess.run([self.features_weight.initializer, self.features_weight.initializer])
self.add_placeholder("ref_img_features")
self.add_placeholder("features_weight")
if self.perc_model is not None and self.lpips_loss is not None:
img1 = tflib.convert_images_from_uint8(self.ref_weight * self.ref_img, nhwc_to_nchw=True)
img2 = tflib.convert_images_from_uint8(self.ref_weight * generated_image, nhwc_to_nchw=True)
self.loss = 0
# L1 loss on VGG16 features
if (self.vgg_loss is not None):
if self.adaptive_loss:
self.loss += self.vgg_loss * tf_custom_adaptive_loss(self.features_weight * self.ref_img_features, self.features_weight * generated_img_features)
else:
self.loss += self.vgg_loss * tf_custom_logcosh_loss(self.features_weight * self.ref_img_features, self.features_weight * generated_img_features)
# + logcosh loss on image pixels
if (self.pixel_loss is not None):
if self.adaptive_loss:
self.loss += self.pixel_loss * tf_custom_adaptive_rgb_loss(self.ref_weight * self.ref_img, self.ref_weight * generated_image)
else:
self.loss += self.pixel_loss * tf_custom_logcosh_loss(self.ref_weight * self.ref_img, self.ref_weight * generated_image)
# + MS-SIM loss on image pixels
if (self.mssim_loss is not None):
self.loss += self.mssim_loss * tf.math.reduce_mean(1-tf.image.ssim_multiscale(self.ref_weight * self.ref_img, self.ref_weight * generated_image, 1))
# + extra perceptual loss on image pixels
if self.perc_model is not None and self.lpips_loss is not None:
self.loss += self.lpips_loss * tf.math.reduce_mean(self.perc_model.get_output_for(img1, img2))
# + L1 penalty on dlatent weights
if self.l1_penalty is not None:
self.loss += self.l1_penalty * 512 * tf.math.reduce_mean(tf.math.abs(generator.dlatent_variable-generator.get_dlatent_avg()))
# discriminator loss (realism)
if self.discriminator_loss is not None:
self.loss += self.discriminator_loss * tf.math.reduce_mean(self.discriminator.get_output_for(tflib.convert_images_from_uint8(generated_image_tensor, nhwc_to_nchw=True), self.stub))
# - discriminator_network.get_output_for(tflib.convert_images_from_uint8(ref_img, nhwc_to_nchw=True), stub)
def generate_face_mask(self, im):
from imutils import face_utils
import cv2
rects = self.detector(im, 1)
# loop over the face detections
for (j, rect) in enumerate(rects):
"""
Determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a NumPy array
"""
shape = self.predictor(im, rect)
shape = face_utils.shape_to_np(shape)
# we extract the face
vertices = cv2.convexHull(shape)
mask = np.zeros(im.shape[:2],np.uint8)
cv2.fillConvexPoly(mask, vertices, 1)
if self.use_grabcut:
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (0,0,im.shape[1],im.shape[2])
(x,y),radius = cv2.minEnclosingCircle(vertices)
center = (int(x),int(y))
radius = int(radius*self.scale_mask)
mask = cv2.circle(mask,center,radius,cv2.GC_PR_FGD,-1)
cv2.fillConvexPoly(mask, vertices, cv2.GC_FGD)
cv2.grabCut(im,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1)
return mask
def set_reference_images(self, images_list):
assert(len(images_list) != 0 and len(images_list) <= self.batch_size)
loaded_image = load_images(images_list, self.img_size, sharpen=self.sharpen_input)
image_features = None
if self.perceptual_model is not None:
image_features = self.perceptual_model.predict_on_batch(preprocess_input(np.array(loaded_image)))
weight_mask = np.ones(self.features_weight.shape)
if self.face_mask:
image_mask = np.zeros(self.ref_weight.shape)
for (i, im) in enumerate(loaded_image):
try:
_, img_name = os.path.split(images_list[i])
mask_img = os.path.join(self.mask_dir, f'{img_name}')
if (os.path.isfile(mask_img)):
print("Loading mask " + mask_img)
imask = PIL.Image.open(mask_img).convert('L')
mask = np.array(imask)/255
mask = np.expand_dims(mask,axis=-1)
else:
mask = self.generate_face_mask(im)
imask = (255*mask).astype('uint8')
imask = PIL.Image.fromarray(imask, 'L')
print("Saving mask " + mask_img)
imask.save(mask_img, 'PNG')
mask = np.expand_dims(mask,axis=-1)
mask = np.ones(im.shape,np.float32) * mask
except Exception as e:
print("Exception in mask handling for " + mask_img)
traceback.print_exc()
mask = np.ones(im.shape[:2],np.uint8)
mask = np.ones(im.shape,np.float32) * np.expand_dims(mask,axis=-1)
image_mask[i] = mask
img = None
else:
image_mask = np.ones(self.ref_weight.shape)
if len(images_list) != self.batch_size:
if image_features is not None:
features_space = list(self.features_weight.shape[1:])
existing_features_shape = [len(images_list)] + features_space
empty_features_shape = [self.batch_size - len(images_list)] + features_space
existing_examples = np.ones(shape=existing_features_shape)
empty_examples = np.zeros(shape=empty_features_shape)
weight_mask = np.vstack([existing_examples, empty_examples])
image_features = np.vstack([image_features, np.zeros(empty_features_shape)])
images_space = list(self.ref_weight.shape[1:])
existing_images_space = [len(images_list)] + images_space
empty_images_space = [self.batch_size - len(images_list)] + images_space
existing_images = np.ones(shape=existing_images_space)
empty_images = np.zeros(shape=empty_images_space)
image_mask = image_mask * np.vstack([existing_images, empty_images])
loaded_image = np.vstack([loaded_image, np.zeros(empty_images_space)])
if image_features is not None:
self.assign_placeholder("features_weight", weight_mask)
self.assign_placeholder("ref_img_features", image_features)
self.assign_placeholder("ref_weight", image_mask)
self.assign_placeholder("ref_img", loaded_image)
def optimize(self, vars_to_optimize, iterations=200, use_optimizer='adam'):
vars_to_optimize = vars_to_optimize if isinstance(vars_to_optimize, list) else [vars_to_optimize]
if use_optimizer == 'lbfgs':
optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, var_list=vars_to_optimize, method='L-BFGS-B', options={'maxiter': iterations})
else:
if use_optimizer == 'ggt':
optimizer = tf.contrib.opt.GGTOptimizer(learning_rate=self.learning_rate)
else:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
min_op = optimizer.minimize(self.loss, var_list=[vars_to_optimize])
self.sess.run(tf.variables_initializer(optimizer.variables()))
fetch_ops = [min_op, self.loss, self.learning_rate]
#min_op = optimizer.minimize(self.sess)
#optim_results = tfp.optimizer.lbfgs_minimize(make_val_and_grad_fn(get_loss), initial_position=vars_to_optimize, num_correction_pairs=10, tolerance=1e-8)
self.sess.run(self._reset_global_step)
#self.sess.graph.finalize() # Graph is read-only after this statement.
for _ in range(iterations):
if use_optimizer == 'lbfgs':
optimizer.minimize(self.sess, fetches=[vars_to_optimize, self.loss])
yield {"loss":self.loss.eval()}
else:
_, loss, lr = self.sess.run(fetch_ops)
yield {"loss":loss,"lr":lr}
| 15,587 | 49.775244 | 192 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/model.py | from typing import Sequence
import gym
import numpy as np
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.modules.noisy_layer import NoisyLayer
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import ModelConfigDict
from ray.rllib.utils.annotations import override
from torch import nn
from ray.rllib.models.preprocessors import DictFlatteningPreprocessor, get_preprocessor
torch, nn = try_import_torch()
class PovBaselineModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs,
model_config, name)
if num_outputs is None:
# required by rllib's lstm wrapper
num_outputs = int(np.product(self.obs_space.shape))
pov_embed_size = 256
inv_emded_size = 256
embed_size = 512
self.pov_embed = nn.Sequential(
nn.Conv2d(3, 64, 4, 4),
nn.ReLU(),
nn.Conv2d(64, 128, 4, 4),
nn.ReLU(),
nn.Conv2d(128, pov_embed_size, 4, 4),
nn.ReLU(),
)
self.inventory_compass_emb = nn.Sequential(
nn.Linear(7, inv_emded_size),
nn.ReLU(),
nn.Linear(inv_emded_size, inv_emded_size),
nn.ReLU(),
)
self.head = nn.Sequential(
nn.Linear(pov_embed_size + inv_emded_size, embed_size),
nn.ReLU(),
nn.Linear(embed_size, embed_size),
nn.ReLU(),
nn.Linear(embed_size, num_outputs),
)
def forward(self, input_dict, state, seq_lens):
obs = input_dict['obs']
pov = obs['pov'] / 255. - 0.5
pov = pov.transpose(2, 3).transpose(1, 2).contiguous()
pov_embed = self.pov_embed(pov)
pov_embed = pov_embed.reshape(pov_embed.shape[0], -1)
inventory_compass = torch.cat([obs['inventory'], obs['compass']], 1)
inv_comp_emb = self.inventory_compass_emb(inventory_compass)
head_input = torch.cat([pov_embed, inv_comp_emb], 1)
return self.head(head_input), state
class GridBaselineModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
# flat_obs = {o: obs_space[o] for o in ['agentPos', 'inventory']}
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs,
model_config, name)
if num_outputs is None:
# required by rllib's lstm wrapper
num_outputs = int(np.product(self.obs_space.shape))
hidden_grid = 300
hidden_vec = 300
hidden = 300
self.encode_grid = nn.Sequential(
nn.Linear(9*11*11, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
nn.ReLU(),
nn.Linear(hidden_grid, hidden_grid),
)
self.encode_pos_inventory = nn.Sequential(
nn.Linear(11, hidden_vec),
nn.ReLU(),
nn.Linear(hidden_vec, hidden_vec),
nn.ReLU()
)
self.head = nn.Sequential(
nn.Linear(2*hidden_grid + hidden_vec, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, num_outputs)
)
def forward(self, input_dict, state, seq_lens):
grid = input_dict['obs']['grid']
target_grid = input_dict['obs']['target_grid']
grid = grid.reshape(grid.shape[0], -1)
target_grid = target_grid.reshape(target_grid.shape[0], -1)
vector_input = torch.cat([input_dict['obs']['agentPos'], input_dict['obs']['inventory']], -1)
grid_embed = self.encode_grid(grid)
target_grid_embed = self.encode_grid(target_grid)
vec_embed = self.encode_pos_inventory(vector_input)
head_input = torch.cat([grid_embed, target_grid_embed, vec_embed], -1)
return self.head(head_input), state | 4,341 | 37.087719 | 101 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/custom_agent.py | import tensorflow as tf
import torch
import gym
from copy import deepcopy as copy
from gym import spaces
import ray
import numpy as np
from torch._C import Value
import yaml
from wrappers import FakeIglu
from train import build_env, register_models
from ray.rllib.agents.registry import get_trainer_class
CONFIG_FILE = './apex_c32/apex_c32.yml'
class CustomAgent:
def __init__(self, action_space):
ray.init(local_mode=True)
self.action_space = action_space
with open(CONFIG_FILE, 'r') as f:
config = yaml.safe_load(f)['iglu-baseline']
with open('metadata', 'r') as f:
meta = yaml.safe_load(f)
if meta['action_space'] != config['config']['env_config']['action_space']:
metadata_as = meta['action_space']
model_as = config['config']['env_config']['action_space']
raise ValueError(
'requested action space in metadata file differs '
'from the one selected by the model. '
f'Metadata action space: {metadata_as}; Model action space {model_as}')
register_models()
Trainer = get_trainer_class(config['run'])
self.config = config
config['config']['in_evaluation'] = True
self.fake_env = build_env(
env_config=config['config']['env_config'],
env_factory=lambda: FakeIglu(config['config']['env_config'], wrap_actions=False)
)
self.visual = config['config']['env_config']['visual']
agent = Trainer(config=config['config'], env=FakeIglu)
agent.restore('./apex_c32/apex_c32')
self.agent = agent
self.actions = iter([])
self.state = None
def policy(self, obs, reward, done, info, state):
if self.agent.config['model'].get('use_lstm', False) and state is None:
cell_size = self.agent.config['model'].get('lstm_cell_size')
state = [
torch.zeros((cell_size,)).float(),
torch.zeros((cell_size,)).float(),
]
output = self.agent.compute_single_action(
obs, explore=False, state=state
)
if not isinstance(output, tuple):
action = output
else:
action, state, _ = output
return action, state
def act(self, obs, reward, done, info):
if done:
self.actions = iter([])
self.state = None
return
try:
action = next(self.actions)
except StopIteration:
obs = self.fake_env.wrap_observation(obs, reward, done, info)
agent_action, self.state = self.policy(obs, reward, done, info, self.state)
self.actions = iter(self.fake_env.stack_actions()(agent_action))
action = next(self.actions)
return copy(action)
| 2,851 | 35.101266 | 92 | py |
SPIGA | SPIGA-main/spiga/eval/results_gen.py | import pkg_resources
import json
import copy
import torch
import spiga.data.loaders.dl_config as dl_cfg
import spiga.data.loaders.dataloader as dl
import spiga.inference.pretreatment as pretreat
from spiga.inference.framework import SPIGAFramework
from spiga.inference.config import ModelConfig
def main():
import argparse
pars = argparse.ArgumentParser(description='Experiment results generator')
pars.add_argument('database', type=str, help='Database name',
choices=['wflw', '300wpublic', '300wprivate', "merlrav", "cofw68"])
pars.add_argument('-a','--anns', type=str, default='test', help='Annotations type: test, valid or train')
pars.add_argument('--gpus', type=int, default=0, help='GPU Id')
args = pars.parse_args()
# Load model framework
model_cfg = ModelConfig(args.database)
model_framework = SPIGAFramework(model_cfg, gpus=[args.gpus])
# Generate results
tester = Tester(model_framework, args.database, anns_type=args.anns)
with torch.no_grad():
tester.generate_results()
class Tester:
def __init__(self, model_framework, database, anns_type='test'):
# Parameters
self.anns_type = anns_type
self.database = database
# Model initialization
self.model_framework = model_framework
# Dataloader
self.dl_eval = dl_cfg.AlignConfig(self.database, mode=self.anns_type)
self.dl_eval.aug_names = []
self.dl_eval.shuffle = False
self.dl_eval.target_dist = self.model_framework.model_cfg.target_dist
self.dl_eval.image_size = self.model_framework.model_cfg.image_size
self.dl_eval.ftmap_size = self.model_framework.model_cfg.ftmap_size
self.batch_size = 1
self.test_data, _ = dl.get_dataloader(self.batch_size, self.dl_eval,
pretreat=pretreat.NormalizeAndPermute(), debug=True)
# Results
self.data_struc = {'imgpath': str, 'bbox': None, 'headpose': None, 'ids': None, 'landmarks': None, 'visible': None}
self.result_path = pkg_resources.resource_filename('spiga', 'eval/results')
self.result_file = '/results_%s_%s.json' % (self.database, self.anns_type)
self.file_out = self.result_path + self.result_file
def generate_results(self):
data = []
for step, batch in enumerate(self.test_data):
print('Step: ', step)
inputs = self.model_framework.select_inputs(batch)
outputs_raw = self.model_framework.net_forward(inputs)
# Postprocessing
outputs = self.model_framework.postreatment(outputs_raw, batch['bbox'], batch['bbox_raw'])
# Data
data_dict = copy.deepcopy(self.data_struc)
data_dict['imgpath'] = batch['imgpath_local'][0]
data_dict['bbox'] = batch['bbox_raw'][0].numpy().tolist()
data_dict['visible'] = batch['visible'][0].numpy().tolist()
data_dict['ids'] = self.dl_eval.database.ldm_ids
data_dict['landmarks'] = outputs['landmarks'][0]
data_dict['headpose'] = outputs['headpose'][0]
data.append(data_dict)
# Save outputs
with open(self.file_out, 'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
main()
| 3,346 | 37.034091 | 123 | py |
SPIGA | SPIGA-main/spiga/models/spiga.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import spiga.models.gnn.pose_proj as pproj
from spiga.models.cnn.cnn_multitask import MultitaskCNN
from spiga.models.gnn.step_regressor import StepRegressor, RelativePositionEncoder
class SPIGA(nn.Module):
def __init__(self, num_landmarks=98, num_edges=15, steps=3, **kwargs):
super(SPIGA, self).__init__()
# Model parameters
self.steps = steps # Cascaded regressors
self.embedded_dim = 512 # GAT input channel
self.nstack = 4 # Number of stacked GATs per step
self.kwindow = 7 # Output cropped window dimension (kernel)
self.swindow = 0.25 # Scale of the cropped window at first step (Dft. 25% w.r.t the input featuremap)
self.offset_ratio = [self.swindow/(2**step)/2 for step in range(self.steps)]
# CNN parameters
self.num_landmarks = num_landmarks
self.num_edges = num_edges
# Initialize backbone
self.visual_cnn = MultitaskCNN(num_landmarks=self.num_landmarks, num_edges=self.num_edges)
# Features dimensions
self.img_res = self.visual_cnn.img_res
self.visual_res = self.visual_cnn.out_res
self.visual_dim = self.visual_cnn.ch_dim
# Initialize Pose head
self.channels_pose = 6
self.pose_fc = nn.Linear(self.visual_cnn.ch_dim, self.channels_pose)
# Initialize feature extractors:
# Relative positional encoder
shape_dim = 2 * (self.num_landmarks - 1)
shape_encoder = []
for step in range(self.steps):
shape_encoder.append(RelativePositionEncoder(shape_dim, self.embedded_dim, [256, 256]))
self.shape_encoder = nn.ModuleList(shape_encoder)
# Diagonal mask used to compute relative positions
diagonal_mask = (torch.ones(self.num_landmarks, self.num_landmarks) - torch.eye(self.num_landmarks)).type(torch.bool)
self.diagonal_mask = nn.parameter.Parameter(diagonal_mask, requires_grad=False)
# Visual feature extractor
conv_window = []
theta_S = []
for step in range(self.steps):
# S matrix per step
WH = self.visual_res # Width/height of ftmap
Wout = self.swindow / (2 ** step) * WH # Width/height of the window
K = self.kwindow # Kernel or resolution of the window
scale = K / WH * (Wout - 1) / (K - 1) # Scale of the affine transformation
# Rescale matrix S
theta_S_stp = torch.tensor([[scale, 0], [0, scale]])
theta_S.append(nn.parameter.Parameter(theta_S_stp, requires_grad=False))
# Convolutional to embedded to BxLxCx1x1
conv_window.append(nn.Conv2d(self.visual_dim, self.embedded_dim, self.kwindow))
self.theta_S = nn.ParameterList(theta_S)
self.conv_window = nn.ModuleList(conv_window)
# Initialize GAT modules
self.gcn = nn.ModuleList([StepRegressor(self.embedded_dim, 256, self.nstack) for i in range(self.steps)])
def forward(self, data):
# Inputs: Visual features and points projections
pts_proj, features = self.backbone_forward(data)
# Visual field
visual_field = features['VisualField'][-1]
# Params compute only once
gat_prob = []
features['Landmarks'] = []
for step in range(self.steps):
# Features generation
embedded_ft = self.extract_embedded(pts_proj, visual_field, step)
# GAT inference
offset, gat_prob = self.gcn[step](embedded_ft, gat_prob)
offset = F.hardtanh(offset)
# Update coordinates
pts_proj = pts_proj + self.offset_ratio[step] * offset
features['Landmarks'].append(pts_proj.clone())
features['GATProb'] = gat_prob
return features
def backbone_forward(self, data):
# Inputs: Image and model3D
imgs = data[0]
model3d = data[1]
cam_matrix = data[2]
# HourGlass Forward
features = self.visual_cnn(imgs)
# Head pose estimation
pose_raw = features['HGcore'][-1]
B, L, _, _ = pose_raw.shape
pose = pose_raw.reshape(B, L)
pose = self.pose_fc(pose)
features['Pose'] = pose.clone()
# Project model 3D
euler = pose[:, 0:3]
trl = pose[:, 3:]
rot = pproj.euler_to_rotation_matrix(euler)
pts_proj = pproj.projectPoints(model3d, rot, trl, cam_matrix)
pts_proj = pts_proj / self.visual_res
return pts_proj, features
def extract_embedded(self, pts_proj, receptive_field, step):
# Visual features
visual_ft = self.extract_visual_embedded(pts_proj, receptive_field, step)
# Shape features
shape_ft = self.calculate_distances(pts_proj)
shape_ft = self.shape_encoder[step](shape_ft)
# Addition
embedded_ft = visual_ft + shape_ft
return embedded_ft
def extract_visual_embedded(self, pts_proj, receptive_field, step):
# Affine matrix generation
B, L, _ = pts_proj.shape # Pts_proj range:[0,1]
centers = pts_proj + 0.5 / self.visual_res # BxLx2
centers = centers.reshape(B * L, 2) # B*Lx2
theta_trl = (-1 + centers * 2).unsqueeze(-1) # BxLx2x1
theta_s = self.theta_S[step] # 2x2
theta_s = theta_s.repeat(B * L, 1, 1) # B*Lx2x2
theta = torch.cat((theta_s, theta_trl), -1) # B*Lx2x3
# Generate crop grid
B, C, _, _ = receptive_field.shape
grid = torch.nn.functional.affine_grid(theta, (B * L, C, self.kwindow, self.kwindow))
grid = grid.reshape(B, L, self.kwindow, self.kwindow, 2)
grid = grid.reshape(B, L, self.kwindow * self.kwindow, 2)
# Crop windows
crops = torch.nn.functional.grid_sample(receptive_field, grid, padding_mode="border") # BxCxLxK*K
crops = crops.transpose(1, 2) # BxLxCxK*K
crops = crops.reshape(B * L, C, self.kwindow, self.kwindow)
# Flatten features
visual_ft = self.conv_window[step](crops)
_, Cout, _, _ = visual_ft.shape
visual_ft = visual_ft.reshape(B, L, Cout)
return visual_ft
def calculate_distances(self, pts_proj):
B, L, _ = pts_proj.shape # BxLx2
pts_a = pts_proj.unsqueeze(-2).repeat(1, 1, L, 1)
pts_b = pts_a.transpose(1, 2)
dist = pts_a - pts_b
dist_wo_self = dist[:, self.diagonal_mask, :].reshape(B, L, -1)
return dist_wo_self
| 6,704 | 37.982558 | 125 | py |
SPIGA | SPIGA-main/spiga/models/cnn/layers.py | from torch import nn
class Conv(nn.Module):
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True):
super(Conv, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - 1) // 2, bias=False)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, "{} {}".format(x.size()[1], self.inp_dim)
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Deconv(nn.Module):
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True):
super(Deconv, self).__init__()
self.inp_dim = inp_dim
self.deconv = nn.ConvTranspose2d(inp_dim, out_dim, kernel_size=kernel_size, stride=stride, bias=False)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, "{} {}".format(x.size()[1], self.inp_dim)
x = self.deconv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Residual(nn.Module):
def __init__(self, inp_dim, out_dim, kernel=3):
super(Residual, self).__init__()
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(inp_dim)
self.conv1 = Conv(inp_dim, int(out_dim / 2), 1, relu=False)
self.bn2 = nn.BatchNorm2d(int(out_dim / 2))
self.conv2 = Conv(int(out_dim / 2), int(out_dim / 2), kernel, relu=False)
self.bn3 = nn.BatchNorm2d(int(out_dim / 2))
self.conv3 = Conv(int(out_dim / 2), out_dim, 1, relu=False)
self.skip_layer = Conv(inp_dim, out_dim, 1, relu=False)
if inp_dim == out_dim:
self.need_skip = False
else:
self.need_skip = True
def forward(self, x):
if self.need_skip:
residual = self.skip_layer(x)
else:
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
return out
| 2,619 | 31.75 | 112 | py |
SPIGA | SPIGA-main/spiga/models/cnn/coord_conv.py | import torch
import torch.nn as nn
class AddCoordsTh(nn.Module):
def __init__(self, x_dim=64, y_dim=64, with_r=False):
super(AddCoordsTh, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.with_r = with_r
xx_channel, yy_channel = self._prepare_coords()
self.xx_channel = nn.parameter.Parameter(xx_channel, requires_grad=False)
self.yy_channel = nn.parameter.Parameter(yy_channel, requires_grad=False)
def _prepare_coords(self):
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32)
xx_ones = xx_ones.unsqueeze(-1)
xx_range = torch.arange(self.x_dim, dtype=torch.int32).unsqueeze(0)
xx_range = xx_range.unsqueeze(1)
xx_channel = torch.matmul(xx_ones, xx_range)
xx_channel = xx_channel.unsqueeze(-1)
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32)
yy_ones = yy_ones.unsqueeze(1)
yy_range = torch.arange(self.y_dim, dtype=torch.int32).unsqueeze(0)
yy_range = yy_range.unsqueeze(-1)
yy_channel = torch.matmul(yy_range, yy_ones)
yy_channel = yy_channel.unsqueeze(-1)
xx_channel = xx_channel.permute(0, 3, 2, 1)
yy_channel = yy_channel.permute(0, 3, 2, 1)
xx_channel = xx_channel.float() / (self.x_dim - 1)
yy_channel = yy_channel.float() / (self.y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
return xx_channel, yy_channel
def forward(self, input_tensor):
"""
input_tensor: (batch, c, x_dim, y_dim)
"""
batch_size_tensor = input_tensor.shape[0]
xx_channel = self.xx_channel.repeat(batch_size_tensor, 1, 1, 1)
yy_channel = self.yy_channel.repeat(batch_size_tensor, 1, 1, 1)
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
| 2,053 | 33.813559 | 92 | py |
SPIGA | SPIGA-main/spiga/models/cnn/hourglass.py | import torch.nn as nn
from spiga.models.cnn.layers import Conv, Deconv, Residual
class Hourglass(nn.Module):
def __init__(self, n, f, bn=None, increase=0):
super(Hourglass, self).__init__()
nf = f + increase
self.up1 = Residual(f, f)
# Lower branch
self.pool1 = Conv(f, f, 2, 2, bn=True, relu=True)
self.low1 = Residual(f, nf)
self.n = n
# Recursive hourglass
if self.n > 1:
self.low2 = Hourglass(n - 1, nf, bn=bn)
else:
self.low2 = Residual(nf, nf)
self.low3 = Residual(nf, f)
self.up2 = Deconv(f, f, 2, 2, bn=True, relu=True)
def forward(self, x):
up1 = self.up1(x)
pool1 = self.pool1(x)
low1 = self.low1(pool1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return up1 + up2
class HourglassCore(Hourglass):
def __init__(self, n, f, bn=None, increase=0):
super(HourglassCore, self).__init__(n, f, bn=bn, increase=increase)
nf = f + increase
if self.n > 1:
self.low2 = HourglassCore(n - 1, nf, bn=bn)
def forward(self, x, core=[]):
up1 = self.up1(x)
pool1 = self.pool1(x)
low1 = self.low1(pool1)
if self.n > 1:
low2, core = self.low2(low1, core=core)
else:
low2 = self.low2(low1)
core.append(low2)
low3 = self.low3(low2)
if self.n > 1:
core.append(low3)
up2 = self.up2(low3)
return up1 + up2, core
| 1,575 | 28.185185 | 75 | py |
SPIGA | SPIGA-main/spiga/models/cnn/transform_e2p.py | import torch
from torch import nn
class E2Ptransform(nn.Module):
"""Edge to Points trasnformation"""
def __init__(self, points, edges, out_dim=64):
super(E2Ptransform, self).__init__()
self.ones = nn.parameter.Parameter(torch.ones((1, out_dim, out_dim)), requires_grad=False)
edge_matrix = self._select_matrix(points, edges)
self.edge2point = nn.parameter.Parameter(edge_matrix, requires_grad=False) # Npoint X Nedges+1
def forward(self, edges):
B, L, H, W = edges.shape
edges_ext = torch.cat((edges, self.ones.repeat(B, 1, 1, 1)), 1)
edges_mat = edges_ext.permute(0, 2, 3, 1).reshape(B, H, W, 1, L+1)
edge2point = self.edge2point.transpose(-1, -2)
point_edges = torch.matmul(edges_mat, edge2point)
point_edges = point_edges.reshape(B, H, W, -1).permute(0, 3, 1, 2)
point_edges[point_edges > 1] = 1.
return point_edges
def _select_matrix(self, points, edges):
if points == 98 and edges == 15:
return WFLW_98x15
elif points == 68 and edges == 13:
return W300_68x13
elif points == 29 and edges == 13:
return COFW_29x13
elif points == 19 and edges == 6:
return AFLW19_19x6
else:
raise ValueError("E2P matrix not implemented")
# Database matrixE2P
WFLW_98x15 = torch.Tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
W300_68x13 = torch.Tensor([ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]])
AFLW19_19x6 = torch.Tensor([[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]])
COFW_29x13 = torch.Tensor([ [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
| 16,877 | 64.418605 | 107 | py |
SPIGA | SPIGA-main/spiga/models/cnn/cnn_multitask.py | from torch import nn
from spiga.models.cnn.layers import Conv, Residual
from spiga.models.cnn.hourglass import HourglassCore
from spiga.models.cnn.coord_conv import AddCoordsTh
from spiga.models.cnn.transform_e2p import E2Ptransform
class MultitaskCNN(nn.Module):
def __init__(self, nstack=4, num_landmarks=98, num_edges=15, pose_req=True, **kwargs):
super(MultitaskCNN, self).__init__()
# Parameters
self.img_res = 256 # WxH input resolution
self.ch_dim = 256 # Default channel dimension
self.out_res = 64 # WxH output resolution
self.nstack = nstack # Hourglass modules stacked
self.num_landmarks = num_landmarks # Number of landmarks
self.num_edges = num_edges # Number of edges subsets (eyeR, eyeL, nose, etc)
self.pose_required = pose_req # Multitask flag
# Image preprocessing
self.pre = nn.Sequential(
AddCoordsTh(x_dim=self.img_res, y_dim=self.img_res, with_r=True),
Conv(6, 64, 7, 2, bn=True, relu=True),
Residual(64, 128),
Conv(128, 128, 2, 2, bn=True, relu=True),
Residual(128, 128),
Residual(128, self.ch_dim)
)
# Hourglass modules
self.hgs = nn.ModuleList([HourglassCore(4, self.ch_dim) for i in range(self.nstack)])
self.hgs_out = nn.ModuleList([
nn.Sequential(
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 1, bn=True, relu=True)
) for i in range(nstack)])
if self.pose_required:
self.hgs_core = nn.ModuleList([
nn.Sequential(
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 2, 2, bn=True, relu=True),
Residual(self.ch_dim, self.ch_dim),
Conv(self.ch_dim, self.ch_dim, 2, 2, bn=True, relu=True)
) for i in range(nstack)])
# Attention module (ADnet style)
self.outs_points = nn.ModuleList([nn.Sequential(Conv(self.ch_dim, self.num_landmarks, 1, relu=False, bn=False),
nn.Sigmoid()) for i in range(self.nstack - 1)])
self.outs_edges = nn.ModuleList([nn.Sequential(Conv(self.ch_dim, self.num_edges, 1, relu=False, bn=False),
nn.Sigmoid()) for i in range(self.nstack - 1)])
self.E2Ptransform = E2Ptransform(self.num_landmarks, self.num_edges, out_dim=self.out_res)
self.outs_features = nn.ModuleList([Conv(self.ch_dim, self.num_landmarks, 1, relu=False, bn=False)for i in range(self.nstack - 1)])
# Stacked Hourglass inputs (nstack > 1)
self.merge_preds = nn.ModuleList([Conv(self.num_landmarks, self.ch_dim, 1, relu=False, bn=False) for i in range(self.nstack - 1)])
self.merge_features = nn.ModuleList([Conv(self.ch_dim, self.ch_dim, 1, relu=False, bn=False) for i in range(self.nstack - 1)])
def forward(self, imgs):
x = self.pre(imgs)
outputs = {'VisualField': [],
'HGcore': []}
core_raw = []
for i in range(self.nstack):
# Hourglass
hg, core_raw = self.hgs[i](x, core=core_raw)
if self.pose_required:
core = self.hgs_core[i](core_raw[-self.hgs[i].n])
outputs['HGcore'].append(core)
hg = self.hgs_out[i](hg)
# Visual features
outputs['VisualField'].append(hg)
# Prepare next stacked input
if i < self.nstack - 1:
# Attentional modules
points = self.outs_points[i](hg)
edges = self.outs_edges[i](hg)
edges_ext = self.E2Ptransform(edges)
point_edges = points * edges_ext
# Landmarks
maps = self.outs_features[i](hg)
preds = maps * point_edges
# Outputs
x = x + self.merge_preds[i](preds) + self.merge_features[i](hg)
return outputs
| 4,196 | 43.178947 | 139 | py |
SPIGA | SPIGA-main/spiga/models/gnn/step_regressor.py | import torch.nn as nn
from spiga.models.gnn.layers import MLP
from spiga.models.gnn.gat import GAT
class StepRegressor(nn.Module):
def __init__(self, input_dim: int, feature_dim: int, nstack=4, decoding=[256, 128, 64, 32]):
super(StepRegressor, self).__init__()
assert nstack > 0
self.nstack = nstack
self.gat = nn.ModuleList([GAT(input_dim, feature_dim, 4)])
for _ in range(nstack-1):
self.gat.append(GAT(feature_dim, feature_dim, 4))
self.decoder = OffsetDecoder(feature_dim, decoding)
def forward(self, embedded, prob_list=[]):
embedded = embedded.transpose(-1, -2)
for i in range(self.nstack):
embedded, prob = self.gat[i](embedded)
prob_list.append(prob)
offset = self.decoder(embedded)
return offset.transpose(-1, -2), prob_list
class OffsetDecoder(nn.Module):
def __init__(self, feature_dim, layers):
super().__init__()
self.decoder = MLP([feature_dim] + layers + [2])
def forward(self, embedded):
return self.decoder(embedded)
class RelativePositionEncoder(nn.Module):
def __init__(self, input_dim, feature_dim, layers):
super().__init__()
self.encoder = MLP([input_dim] + layers + [feature_dim])
def forward(self, feature):
feature = feature.transpose(-1, -2)
return self.encoder(feature).transpose(-1, -2)
| 1,423 | 31.363636 | 96 | py |
SPIGA | SPIGA-main/spiga/models/gnn/layers.py | from torch import nn
def MLP(channels: list):
n = len(channels)
layers = []
for i in range(1, n):
layers.append(nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
| 349 | 25.923077 | 88 | py |
SPIGA | SPIGA-main/spiga/models/gnn/gat.py | from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
from spiga.models.gnn.layers import MLP
class GAT(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_heads=4):
super().__init__()
num_heads_in = num_heads
self.reshape = None
if input_dim != output_dim:
for num_heads_in in range(num_heads, 0, -1):
if input_dim % num_heads_in == 0:
break
self.reshape = MLP([input_dim, output_dim])
self.attention = MessagePassing(input_dim, num_heads_in, out_dim=output_dim)
def forward(self, features):
message, prob = self.attention(features)
if self.reshape:
features = self.reshape(features)
output = features + message
return output, prob
class MessagePassing(nn.Module):
def __init__(self, feature_dim: int, num_heads: int, out_dim=None):
super().__init__()
self.attn = Attention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, out_dim])
def forward(self, features):
message, prob = self.attn(features, features, features)
return self.mlp(torch.cat([features, message], dim=1)), prob
class Attention(nn.Module):
def __init__(self, num_heads: int, feature_dim: int):
super().__init__()
assert feature_dim % num_heads == 0
self.dim = feature_dim // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(feature_dim, feature_dim, kernel_size=1)
self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = self.attention(query, key, value)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)), prob
def attention(self, query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim ** .5
prob = F.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
| 2,284 | 35.269841 | 92 | py |
SPIGA | SPIGA-main/spiga/models/gnn/pose_proj.py | import torch
import math
def euler_to_rotation_matrix(euler):
# http://euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
# Change coordinates system
euler[:, 0] = -(euler[:, 0]-90)
euler[:, 1] = -euler[:, 1]
euler[:, 2] = -(euler[:, 2]+90)
# Convert to radians
rad = euler*(math.pi/180.0)
cy = torch.cos(rad[:, 0])
sy = torch.sin(rad[:, 0])
cp = torch.cos(rad[:, 1])
sp = torch.sin(rad[:, 1])
cr = torch.cos(rad[:, 2])
sr = torch.sin(rad[:, 2])
# Init R matrix tensors
working_device = None
if euler.is_cuda:
working_device = euler.device
Ry = torch.zeros((euler.shape[0], 3, 3), device=working_device)
Rp = torch.zeros((euler.shape[0], 3, 3), device=working_device)
Rr = torch.zeros((euler.shape[0], 3, 3), device=working_device)
# Yaw
Ry[:, 0, 0] = cy
Ry[:, 0, 2] = sy
Ry[:, 1, 1] = 1.
Ry[:, 2, 0] = -sy
Ry[:, 2, 2] = cy
# Pitch
Rp[:, 0, 0] = cp
Rp[:, 0, 1] = -sp
Rp[:, 1, 0] = sp
Rp[:, 1, 1] = cp
Rp[:, 2, 2] = 1.
# Roll
Rr[:, 0, 0] = 1.
Rr[:, 1, 1] = cr
Rr[:, 1, 2] = -sr
Rr[:, 2, 1] = sr
Rr[:, 2, 2] = cr
return torch.matmul(torch.matmul(Ry, Rp), Rr)
def projectPoints(pts, rot, trl, cam_matrix):
# Get working device
working_device = None
if pts.is_cuda:
working_device = pts.device
# Perspective projection model
trl = trl.unsqueeze(2)
extrinsics = torch.cat((rot, trl), 2)
proj_matrix = torch.matmul(cam_matrix, extrinsics)
# Homogeneous landmarks
ones = torch.ones(pts.shape[:2], device=working_device, requires_grad=trl.requires_grad)
ones = ones.unsqueeze(2)
pts_hom = torch.cat((pts, ones), 2)
# Project landmarks
pts_proj = pts_hom.permute((0, 2, 1)) # Transpose
pts_proj = torch.matmul(proj_matrix, pts_proj)
pts_proj = pts_proj.permute((0, 2, 1))
pts_proj = pts_proj/pts_proj[:, :, 2].unsqueeze(2) # Lambda = 1
return pts_proj[:, :, :-1]
| 2,046 | 25.24359 | 92 | py |
SPIGA | SPIGA-main/spiga/data/loaders/alignments.py | import os
import json
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from spiga.data.loaders.transforms import get_transformers
class AlignmentsDataset(Dataset):
'''Loads datasets of images with landmarks and bounding boxes.
'''
def __init__(self,
database,
json_file,
images_dir,
image_size=(128, 128),
transform=None,
indices=None,
debug=False):
"""
:param database: class DatabaseStruct containing all the specifics of the database
:param json_file: path to the json file which contains the names of the images, landmarks, bounding boxes, etc
:param images_dir: path of the directory containing the images.
:param image_size: tuple like e.g. (128, 128)
:param transform: composition of transformations that will be applied to the samples.
:param debug_mode: bool if True, loads a very reduced_version of the dataset for debugging purposes.
:param indices: If it is a list of indices, allows to work with the subset of
items specified by the list. If it is None, the whole set is used.
"""
self.database = database
self.images_dir = images_dir
self.transform = transform
self.image_size = image_size
self.indices = indices
self._imgs_dict = None
self.debug = debug
with open(json_file) as jsonfile:
self.data = json.load(jsonfile)
def __len__(self):
'''Returns the length of the dataset
'''
if self.indices is None:
return len(self.data)
else:
return len(self.indices)
def __getitem__(self, sample_idx):
'''Returns sample of the dataset of index idx'''
# To allow work with a subset
if self.indices is not None:
sample_idx = self.indices[sample_idx]
# Load sample image
img_name = os.path.join(self.images_dir, self.data[sample_idx]['imgpath'])
if not self._imgs_dict:
image_cv = cv2.imread(img_name)
else:
image_cv = self._imgs_dict[sample_idx]
# Some images are B&W. We make sure that any image has three channels.
if len(image_cv.shape) == 2:
image_cv = np.repeat(image_cv[:, :, np.newaxis], 3, axis=-1)
# Some images have alpha channel
image_cv = image_cv[:, :, :3]
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image_cv)
# Load sample anns
ids = np.array(self.data[sample_idx]['ids'])
landmarks = np.array(self.data[sample_idx]['landmarks'])
bbox = np.array(self.data[sample_idx]['bbox'])
vis = np.array(self.data[sample_idx]['visible'])
headpose = self.data[sample_idx]['headpose']
# Generate bbox if need it
if bbox is None:
# Compute bbox using landmarks
aux = landmarks[vis == 1.0]
bbox = np.zeros(4)
bbox[0] = min(aux[:, 0])
bbox[1] = min(aux[:, 1])
bbox[2] = max(aux[:, 0]) - bbox[0]
bbox[3] = max(aux[:, 1]) - bbox[1]
# Clean and mask landmarks
mask_ldm = np.ones(self.database.num_landmarks)
if not self.database.ldm_ids == ids.tolist():
new_ldm = np.zeros((self.database.num_landmarks, 2))
new_vis = np.zeros(self.database.num_landmarks)
xyv = np.hstack((landmarks, vis[np.newaxis,:].T))
ids_dict = dict(zip(ids.astype(int).astype(str), xyv))
for pos, identifier in enumerate(self.database.ldm_ids):
if str(identifier) in ids_dict:
x, y, v = ids_dict[str(identifier)]
new_ldm[pos] = [x,y]
new_vis[pos] = v
else:
mask_ldm[pos] = 0
landmarks = new_ldm
vis = new_vis
sample = {'image': image,
'sample_idx': sample_idx,
'imgpath': img_name,
'ids_ldm': np.array(self.database.ldm_ids),
'bbox': bbox,
'bbox_raw': bbox,
'landmarks': landmarks,
'visible': vis.astype(np.float64),
'mask_ldm': mask_ldm,
'imgpath_local': self.data[sample_idx]['imgpath'],
}
if self.debug:
sample['landmarks_ori'] = landmarks
sample['visible_ori'] = vis.astype(np.float64)
sample['mask_ldm_ori'] = mask_ldm
if headpose is not None:
sample['headpose_ori'] = np.array(headpose)
if self.transform:
sample = self.transform(sample)
return sample
def get_dataset(data_config, pretreat=None, debug=False):
augmentors = get_transformers(data_config)
if pretreat is not None:
augmentors.append(pretreat)
dataset = AlignmentsDataset(data_config.database,
data_config.anns_file,
data_config.image_dir,
image_size=data_config.image_size,
transform=transforms.Compose(augmentors),
indices=data_config.ids,
debug=debug)
return dataset
| 5,540 | 33.849057 | 118 | py |
SPIGA | SPIGA-main/spiga/data/loaders/dataloader.py | from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import spiga.data.loaders.alignments as zoo_alignments
zoos = [zoo_alignments]
def get_dataset(data_config, pretreat=None, debug=False):
for zoo in zoos:
dataset = zoo.get_dataset(data_config, pretreat=pretreat, debug=debug)
if dataset is not None:
return dataset
raise NotImplementedError('Dataset not available')
def get_dataloader(batch_size, data_config, pretreat=None, sampler_cfg=None, debug=False):
dataset = get_dataset(data_config, pretreat=pretreat, debug=debug)
if (len(dataset) % batch_size) == 1 and data_config.shuffle == True:
drop_last_batch = True
else:
drop_last_batch = False
shuffle = data_config.shuffle
sampler = None
if sampler_cfg is not None:
sampler = DistributedSampler(dataset, num_replicas=sampler_cfg.world_size, rank=sampler_cfg.rank)
shuffle = False
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=data_config.num_workers,
pin_memory=True,
drop_last=drop_last_batch,
sampler=sampler)
return dataloader, dataset
| 1,360 | 31.404762 | 105 | py |
SPIGA | SPIGA-main/spiga/data/loaders/transforms.py | import cv2
import numpy as np
import torch
from spiga.data.loaders.augmentors.modern_posit import PositPose
from spiga.data.loaders.augmentors.heatmaps import Heatmaps
from spiga.data.loaders.augmentors.boundary import AddBoundary
from spiga.data.loaders.augmentors.landmarks import HorizontalFlipAug, RSTAug, OcclusionAug, \
LightingAug, BlurAug, TargetCropAug
def get_transformers(data_config):
# Data augmentation
aug_names = data_config.aug_names
augmentors = []
if 'flip' in aug_names:
augmentors.append(HorizontalFlipAug(data_config.database.ldm_flip_order, data_config.hflip_prob))
if 'rotate_scale' in aug_names:
augmentors.append(RSTAug(data_config.angle_range, data_config.scale_min,
data_config.scale_max, data_config.trl_ratio))
if 'occlusion' in aug_names:
augmentors.append(OcclusionAug(data_config.occluded_min_len,
data_config.occluded_max_len,
data_config.database.num_landmarks))
if 'lighting' in aug_names:
augmentors.append(LightingAug(data_config.hsv_range_min, data_config.hsv_range_max))
if 'blur' in aug_names:
augmentors.append(BlurAug(data_config.blur_prob, data_config.blur_kernel_range))
# Crop mandatory
augmentors.append(TargetCropAug(data_config.image_size, data_config.ftmap_size, data_config.target_dist))
# Opencv style
augmentors.append(ToOpencv())
# Gaussian heatmaps
if 'heatmaps2D' in aug_names:
augmentors.append(Heatmaps(data_config.database.num_landmarks, data_config.ftmap_size,
data_config.sigma2D, norm=data_config.heatmap2D_norm))
if 'boundaries' in aug_names:
augmentors.append(AddBoundary(num_landmarks=data_config.database.num_landmarks,
map_size=data_config.ftmap_size,
sigma=data_config.sigmaBD))
# Pose generator
if data_config.generate_pose:
augmentors.append(PositPose(data_config.database.ldm_ids,
focal_ratio=data_config.focal_ratio,
selected_ids=data_config.posit_ids,
max_iter=data_config.posit_max_iter))
return augmentors
class ToOpencv:
def __call__(self, sample):
# Convert in a numpy array and change to GBR
image = np.array(sample['image'])
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
sample['image'] = image
return sample
class TargetCrop(TargetCropAug):
def __init__(self, crop_size=256, target_dist=1.6):
super(TargetCrop, self).__init__(crop_size, crop_size, target_dist)
class AddModel3D(PositPose):
def __init__(self, ldm_ids, ftmap_size=(256, 256), focal_ratio=1.5, totensor=False):
super(AddModel3D, self).__init__(ldm_ids, focal_ratio=focal_ratio)
img_bbox = [0, 0, ftmap_size[1], ftmap_size[0]] # Shapes given are inverted (y,x)
self.cam_matrix = self._camera_matrix(img_bbox)
if totensor:
self.cam_matrix = torch.tensor(self.cam_matrix, dtype=torch.float)
self.model3d_world = torch.tensor(self.model3d_world, dtype=torch.float)
def __call__(self, sample={}):
# Save intrinsic matrix and 3D model landmarks
sample['cam_matrix'] = self.cam_matrix
sample['model3d'] = self.model3d_world
return sample
| 3,558 | 40.870588 | 109 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/landmarks.py | import random
import cv2
import numpy as np
from PIL import Image
from torchvision import transforms
# My libs
import spiga.data.loaders.augmentors.utils as dlu
class HorizontalFlipAug:
def __init__(self, ldm_flip_order, prob=0.5):
self.prob = prob
self.ldm_flip_order = ldm_flip_order
def __call__(self, sample):
img = sample['image']
landmarks = sample['landmarks']
mask = sample['mask_ldm']
vis = sample['visible']
bbox = sample['bbox']
if random.random() < self.prob:
new_img = transforms.functional.hflip(img)
lm_new_order = self.ldm_flip_order
new_landmarks = landmarks[lm_new_order]
new_landmarks = (new_landmarks - (img.size[0], 0)) * (-1, 1)
new_mask = mask[lm_new_order]
new_vis = vis[lm_new_order]
x, y, w, h = bbox
new_x = img.size[0] - x - w
new_bbox = np.array((new_x, y, w, h))
sample['image'] = new_img
sample['landmarks'] = new_landmarks
sample['mask_ldm'] = new_mask
sample['visible'] = new_vis
sample['bbox'] = new_bbox
return sample
class GeometryBaseAug:
def __call__(self, sample):
raise NotImplementedError('Inheritance __call__ not defined')
def map_affine_transformation(self, sample, affine_transf, new_size=None):
sample['image'] = self._image_affine_trans(sample['image'], affine_transf, new_size)
sample['bbox'] = self._bbox_affine_trans(sample['bbox'], affine_transf)
if 'landmarks' in sample.keys():
sample['landmarks'] = self._landmarks_affine_trans(sample['landmarks'], affine_transf)
return sample
def clean_outbbox_landmarks(self, shape, landmarks, mask):
filter_x1 = landmarks[:, 0] >= shape[0]
filter_x2 = landmarks[:, 0] < (shape[0] + shape[2])
filter_x = np.logical_and(filter_x1,filter_x2)
filter_y1 = landmarks[:, 1] >= shape[1]
filter_y2 = landmarks[:, 1] < (shape[1] + shape[3])
filter_y = np.logical_and(filter_y1, filter_y2)
filter_bbox = np.logical_and(filter_x, filter_y)
new_mask = mask*filter_bbox
new_landmarks = (landmarks.T * new_mask).T
new_landmarks = new_landmarks.astype(int).astype(float)
return new_mask, new_landmarks
def _image_affine_trans(self, image, affine_transf, new_size=None):
if not new_size:
new_size = image.size
inv_affine_transf = dlu.get_inverse_transf(affine_transf)
new_image = image.transform(new_size, Image.AFFINE, inv_affine_transf.flatten())
return new_image
def _bbox_affine_trans(self, bbox, affine_transf):
x, y, w, h = bbox
images_bb = []
for point in ([x, y, 1], [x + w, y, 1],
[x, y + h, 1], [x + w, y + h, 1]):
images_bb.append(affine_transf.dot(point))
images_bb = np.array(images_bb)
new_corner0 = np.min(images_bb, axis=0)
new_corner1 = np.max(images_bb, axis=0)
new_x, new_y = new_corner0
new_w, new_h = new_corner1 - new_corner0
new_bbox = np.array((new_x, new_y, new_w, new_h))
return new_bbox
def _landmarks_affine_trans(self, landmarks, affine_transf):
homog_landmarks = dlu.affine2homogeneous(landmarks)
new_landmarks = affine_transf.dot(homog_landmarks.T).T
return new_landmarks
class RSTAug(GeometryBaseAug):
def __init__(self, angle_range=45., scale_min=-0.15, scale_max=0.15, trl_ratio=0.05):
self.scale_max = scale_max
self.scale_min = scale_min
self.angle_range = angle_range
self.trl_ratio = trl_ratio
def __call__(self, sample):
x, y, w, h = sample['bbox']
x0, y0 = x + w/2, y + h/2 # center of the face, which will be the center of the rotation
# Bbox translation
rnd_Tx = np.random.uniform(-self.trl_ratio, self.trl_ratio) * w
rnd_Ty = np.random.uniform(-self.trl_ratio, self.trl_ratio) * h
sample['bbox'][0] += rnd_Tx
sample['bbox'][1] += rnd_Ty
scale = 1 + np.random.uniform(self.scale_min, self.scale_max)
angle = np.random.uniform(-self.angle_range, self.angle_range)
similarity = dlu.get_similarity_matrix(angle, scale, center=(x0, y0))
new_sample = self.map_affine_transformation(sample, similarity)
return new_sample
class TargetCropAug(GeometryBaseAug):
def __init__(self, img_new_size=128, map_new_size=128, target_dist=1.3):
self.target_dist = target_dist
self.new_size_x, self.new_size_y = self._convert_shapes(img_new_size)
self.map_size_x, self.map_size_y = self._convert_shapes(map_new_size)
self.img2map_scale = False
# Mismatch between img shape and featuremap shape
if self.map_size_x != self.new_size_x or self.map_size_y != self.new_size_y:
self.img2map_scale = True
self.map_scale_x = self.map_size_x / self.new_size_x
self.map_scale_y = self.map_size_y / self.new_size_y
self.map_scale_xx = self.map_scale_x * self.map_scale_x
self.map_scale_xy = self.map_scale_x * self.map_scale_y
self.map_scale_yy = self.map_scale_y * self.map_scale_y
def _convert_shapes(self, new_size):
if isinstance(new_size, (tuple, list)):
new_size_x = new_size[0]
new_size_y = new_size[1]
else:
new_size_x = new_size
new_size_y = new_size
return new_size_x, new_size_y
def __call__(self, sample):
x, y, w, h = sample['bbox']
# we enlarge the area taken around the bounding box
# it is neccesary to change the botton left point of the bounding box
# according to the previous enlargement. Note this will NOT be the new
# bounding box!
# We return square images, which is neccesary since
# all the images must have the same size in order to form batches
side = max(w, h) * self.target_dist
x -= (side - w) / 2
y -= (side - h) / 2
# center of the enlarged bounding box
x0, y0 = x + side/2, y + side/2
# homothety factor, chosen so the new horizontal dimension will
# coincide with new_size
mu_x = self.new_size_x / side
mu_y = self.new_size_y / side
# new_w, new_h = new_size, int(h * mu)
new_w = self.new_size_x
new_h = self.new_size_y
new_x0, new_y0 = new_w / 2, new_h / 2
# dilatation + translation
affine_transf = np.array([[mu_x, 0, new_x0 - mu_x * x0],
[0, mu_y, new_y0 - mu_y * y0]])
sample = self.map_affine_transformation(sample, affine_transf,(new_w, new_h))
if 'landmarks' in sample.keys():
img_shape = np.array([0, 0, self.new_size_x, self.new_size_y])
sample['landmarks_float'] = sample['landmarks']
sample['mask_ldm_float'] = sample['mask_ldm']
sample['landmarks'] = np.round(sample['landmarks'])
sample['mask_ldm'], sample['landmarks'] = self.clean_outbbox_landmarks(img_shape, sample['landmarks'],
sample['mask_ldm'])
if self.img2map_scale:
sample = self._rescale_map(sample)
return sample
def _rescale_map(self, sample):
# Rescale
lnd_float = sample['landmarks_float']
lnd_float[:, 0] = self.map_scale_x * lnd_float[:, 0]
lnd_float[:, 1] = self.map_scale_y * lnd_float[:, 1]
# Filter landmarks
lnd = np.round(lnd_float)
filter_x = lnd[:, 0] >= self.map_size_x
filter_y = lnd[:, 1] >= self.map_size_y
lnd[filter_x] = self.map_size_x - 1
lnd[filter_y] = self.map_size_y - 1
new_lnd = (lnd.T * sample['mask_ldm']).T
new_lnd = new_lnd.astype(int).astype(float)
sample['landmarks_float'] = lnd_float
sample['landmarks'] = new_lnd
sample['img2map_scale'] = [self.map_scale_x, self.map_scale_y]
return sample
class OcclusionAug:
def __init__(self, min_length=0.1, max_length=0.4, num_maps=1):
self.min_length = min_length
self.max_length = max_length
self.num_maps = num_maps
def __call__(self, sample):
x, y, w, h = sample['bbox']
image = sample['image']
landmarks = sample['landmarks']
vis = sample['visible']
min_ratio = self.min_length
max_ratio = self.max_length
rnd_width = np.random.randint(int(w * min_ratio), int(w * max_ratio))
rnd_height = np.random.randint(int(h * min_ratio), int(h * max_ratio))
# (xi, yi) and (xf, yf) are, respectively, the lower left points of the
# occlusion rectangle and the upper right point.
xi = int(x + np.random.randint(0, w - rnd_width))
xf = int(xi + rnd_width)
yi = int(y + np.random.randint(0, h - rnd_height))
yf = int(yi + rnd_height)
pixels = np.array(image)
pixels[yi:yf, xi:xf, :] = np.random.uniform(0, 255, size=3)
image = Image.fromarray(pixels)
sample['image'] = image
# Update visibilities
filter_x1 = landmarks[:, 0] >= xi
filter_x2 = landmarks[:, 0] < xf
filter_x = np.logical_and(filter_x1, filter_x2)
filter_y1 = landmarks[:, 1] >= yi
filter_y2 = landmarks[:, 1] < yf
filter_y = np.logical_and(filter_y1, filter_y2)
filter_novis = np.logical_and(filter_x, filter_y)
filter_vis = np.logical_not(filter_novis)
sample['visible'] = vis * filter_vis
return sample
class LightingAug:
def __init__(self, hsv_range_min=(-0.5, -0.5, -0.5), hsv_range_max=(0.5, 0.5, 0.5)):
self.hsv_range_min = hsv_range_min
self.hsv_range_max = hsv_range_max
def __call__(self, sample):
# Convert to HSV colorspace from RGB colorspace
image = np.array(sample['image'])
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# Generate new random values
H = 1 + np.random.uniform(self.hsv_range_min[0], self.hsv_range_max[0])
S = 1 + np.random.uniform(self.hsv_range_min[1], self.hsv_range_max[1])
V = 1 + np.random.uniform(self.hsv_range_min[2], self.hsv_range_max[2])
hsv[:, :, 0] = np.clip(H*hsv[:, :, 0], 0, 179)
hsv[:, :, 1] = np.clip(S*hsv[:, :, 1], 0, 255)
hsv[:, :, 2] = np.clip(V*hsv[:, :, 2], 0, 255)
# Convert back to BGR colorspace
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
sample['image'] = Image.fromarray(image)
return sample
class BlurAug:
def __init__(self, blur_prob=0.5, blur_kernel_range=(0, 2)):
self.blur_prob = blur_prob
self.kernel_range = blur_kernel_range
def __call__(self, sample):
# Smooth image
image = np.array(sample['image'])
if np.random.uniform(0.0, 1.0) < self.blur_prob:
kernel = np.random.random_integers(self.kernel_range[0], self.kernel_range[1]) * 2 + 1
image = cv2.GaussianBlur(image, (kernel, kernel), 0, 0)
sample['image'] = Image.fromarray(image)
return sample
| 11,374 | 35.931818 | 114 | py |
SPIGA | SPIGA-main/spiga/inference/pretreatment.py | from torchvision import transforms
import numpy as np
from PIL import Image
import cv2
from spiga.data.loaders.transforms import TargetCrop, ToOpencv, AddModel3D
def get_transformers(data_config):
transformer_seq = [
Opencv2Pil(),
TargetCrop(data_config.image_size, data_config.target_dist),
ToOpencv(),
NormalizeAndPermute()]
return transforms.Compose(transformer_seq)
class NormalizeAndPermute:
def __call__(self, sample):
image = np.array(sample['image'], dtype=float)
image = np.transpose(image, (2, 0, 1))
sample['image'] = image / 255
return sample
class Opencv2Pil:
def __call__(self, sample):
image = cv2.cvtColor(sample['image'], cv2.COLOR_BGR2RGB)
sample['image'] = Image.fromarray(image)
return sample
| 825 | 24.8125 | 74 | py |
SPIGA | SPIGA-main/spiga/inference/framework.py | import os
import pkg_resources
import copy
import torch
import numpy as np
# Paths
weights_path_dft = pkg_resources.resource_filename('spiga', 'models/weights')
import spiga.inference.pretreatment as pretreat
from spiga.models.spiga import SPIGA
from spiga.inference.config import ModelConfig
class SPIGAFramework:
def __init__(self, model_cfg: ModelConfig(), gpus=[0], load3DM=True):
# Parameters
self.model_cfg = model_cfg
self.gpus = gpus
# Pretreatment initialization
self.transforms = pretreat.get_transformers(self.model_cfg)
# SPIGA model
self.model_inputs = ['image', "model3d", "cam_matrix"]
self.model = SPIGA(num_landmarks=model_cfg.dataset.num_landmarks,
num_edges=model_cfg.dataset.num_edges)
# Load weights and set model
weights_path = self.model_cfg.model_weights_path
if weights_path is None:
weights_path = weights_path_dft
if self.model_cfg.load_model_url:
model_state_dict = torch.hub.load_state_dict_from_url(self.model_cfg.model_weights_url,
model_dir=weights_path,
file_name=self.model_cfg.model_weights)
else:
weights_file = os.path.join(weights_path, self.model_cfg.model_weights)
model_state_dict = torch.load(weights_file)
self.model.load_state_dict(model_state_dict)
self.model = self.model.cuda(gpus[0])
self.model.eval()
print('SPIGA model loaded!')
# Load 3D model and camera intrinsic matrix
if load3DM:
loader_3DM = pretreat.AddModel3D(model_cfg.dataset.ldm_ids,
ftmap_size=model_cfg.ftmap_size,
focal_ratio=model_cfg.focal_ratio,
totensor=True)
params_3DM = self._data2device(loader_3DM())
self.model3d = params_3DM['model3d']
self.cam_matrix = params_3DM['cam_matrix']
def inference(self, image, bboxes):
"""
@param self:
@param image: Raw image
@param bboxes: List of bounding box founded on the image [[x,y,w,h],...]
@return: features dict {'landmarks': list with shape (num_bbox, num_landmarks, 2) and x,y referred to image size
'headpose': list with shape (num_bbox, 6) euler->[:3], trl->[3:]
"""
batch_crops, crop_bboxes = self.pretreat(image, bboxes)
outputs = self.net_forward(batch_crops)
features = self.postreatment(outputs, crop_bboxes, bboxes)
return features
def pretreat(self, image, bboxes):
crop_bboxes = []
crop_images = []
for bbox in bboxes:
sample = {'image': copy.deepcopy(image),
'bbox': copy.deepcopy(bbox)}
sample_crop = self.transforms(sample)
crop_bboxes.append(sample_crop['bbox'])
crop_images.append(sample_crop['image'])
# Images to tensor and device
batch_images = torch.tensor(np.array(crop_images), dtype=torch.float)
batch_images = self._data2device(batch_images)
# Batch 3D model and camera intrinsic matrix
batch_model3D = self.model3d.unsqueeze(0).repeat(len(bboxes), 1, 1)
batch_cam_matrix = self.cam_matrix.unsqueeze(0).repeat(len(bboxes), 1, 1)
# SPIGA inputs
model_inputs = [batch_images, batch_model3D, batch_cam_matrix]
return model_inputs, crop_bboxes
def net_forward(self, inputs):
outputs = self.model(inputs)
return outputs
def postreatment(self, output, crop_bboxes, bboxes):
features = {}
crop_bboxes = np.array(crop_bboxes)
bboxes = np.array(bboxes)
if 'Landmarks' in output.keys():
landmarks = output['Landmarks'][-1].cpu().detach().numpy()
landmarks = landmarks.transpose((1, 0, 2))
landmarks = landmarks*self.model_cfg.image_size
landmarks_norm = (landmarks - crop_bboxes[:, 0:2]) / crop_bboxes[:, 2:4]
landmarks_out = (landmarks_norm * bboxes[:, 2:4]) + bboxes[:, 0:2]
landmarks_out = landmarks_out.transpose((1, 0, 2))
features['landmarks'] = landmarks_out.tolist()
# Pose output
if 'Pose' in output.keys():
pose = output['Pose'].cpu().detach().numpy()
features['headpose'] = pose.tolist()
return features
def select_inputs(self, batch):
inputs = []
for ft_name in self.model_inputs:
data = batch[ft_name]
inputs.append(self._data2device(data.type(torch.float)))
return inputs
def _data2device(self, data):
if isinstance(data, list):
data_var = data
for data_id, v_data in enumerate(data):
data_var[data_id] = self._data2device(v_data)
if isinstance(data, dict):
data_var = data
for k, v in data.items():
data[k] = self._data2device(v)
else:
with torch.no_grad():
data_var = data.cuda(device=self.gpus[0], non_blocking=True)
return data_var
| 5,368 | 37.905797 | 120 | py |
ReconVAT | ReconVAT-master/train_baseline_Thickstun.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 10
saving_freq = 10
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=1.3
small = True
KL_Div = False
reconstruction = False
batch_size = 1
train_batch_size = 1
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 0.0001
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_ThickStun-lr={learning_rate}'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
model = Thickstun()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
predictions, losses, optimizer = train_model(model, ep, supervised_loader,
optimizer, scheduler, clip_gradient_norm)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 5,952 | 36.677215 | 142 | py |
ReconVAT | ReconVAT-master/evaluate.py | import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch
from tqdm import tqdm
from model import *
from evaluate import *
import pickle
import shutil
import os
ex = Experiment('evaluate')
log = True
@ex.config
def config():
spec = 'Mel'
attention_mode = 'onset'
mode = 'imagewise'
weight_file = None
output_folder = 'results'
inference=True
LSTM = True
onset = True
device = 'cuda:0'
refresh=False
cat_feat = False
Simple_attention=True
logdir = os.path.join('results', weight_file)
@ex.automain
def train(spec, inference, refresh, device, logdir, weight_file, mode, LSTM, onset, Simple_attention, cat_feat):
if inference:
inference_state = 'infer'
else:
inference_state = 'no_infer'
print_config(ex.current_run)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
weight_path = os.path.join('trained_weights', weight_file)
model_type = os.path.basename(weight_path).split('-')[0]
attention_mode = os.path.basename(weight_path).split('-')[3]
if attention_mode=='feat':
attention_mode='activation' # change the flag to match the weight name
try:
modifier = os.path.basename(weight_path).split('-')[4]
if modifier=='no_biLSTM':
LSTM=False
elif modifier=='no_onset':
onset=False
except:
modifier='Null'
if model_type=='Original':
model = OnsetsAndFrames(N_BINS, MAX_MIDI - MIN_MIDI + 1, log=log, mode=mode,
spec=spec, LSTM=LSTM, onset_stack=onset)
elif model_type=='Attention':
print('run me')
model = OnsetsAndFrames_with_fast_local_attn(N_BINS, MAX_MIDI - MIN_MIDI + 1,
log=log, mode=mode, spec=spec,
LSTM=LSTM, onset_stack=onset,
attention_mode=attention_mode)
elif model_type=='Simple':
model = SimpleModel(N_BINS, MAX_MIDI - MIN_MIDI + 1, log=log, mode=mode, spec=spec,
device=device, w_size=int(modifier[2:]), attention=Simple_attention, layers=1,
cat_feat=False, onset=False)
model.to(device)
model.load_my_state_dict(torch.load(weight_path+'.pt'))
with torch.no_grad():
model.eval()
metrics = evaluate_wo_velocity(tqdm(validation_dataset), model, reconstruction=False,
save_path=os.path.join(logdir,f'./MIDI_results-{inference_state}-{modifier}'),
onset=inference)
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values)*100:.3f} ± {np.std(values)*100:.3f}')
export_path = os.path.join(logdir, f'result_dict_{inference_state}-{modifier}')
pickle.dump(metrics, open(export_path, 'wb')) | 3,416 | 34.226804 | 117 | py |
ReconVAT | ReconVAT-master/train_UNet_Onset_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'MAPS'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=2
small = False
supersmall = False
KL_Div = False
reconstruction = False
batch_size = 8
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/Unet_Onset-recons={reconstruction}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction, supersmall):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=supersmall,
dataset=train_on)
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, 4, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
model = UNet_Onset(ds_ksize,ds_stride, log=log, reconstruction=reconstruction,
mode=mode, spec=spec, device=device, XI=XI, eps=eps)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 7,486 | 41.782857 | 219 | py |
ReconVAT | ReconVAT-master/transcribe_files.py | import pickle
import os
import numpy as np
from model import *
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
ex = Experiment('transcription')
def transcribe2midi(data, model, model_type, onset_threshold=0.5, frame_threshold=0.5, save_path=None, reconstruction=True, onset=True, pseudo_onset=False, rule='rule2', VAT=False):
for i in data:
pred = model.transcribe(i)
# print(f"pred['onset2'] = {pred['onset2'].shape}")
# print(f"pred['frame2'] = {pred['frame2'].shape}")
for key, value in pred.items():
if key in ['frame','onset', 'frame2', 'onset2']:
value.squeeze_(0).relu_() # remove batch dim and remove make sure no negative values
p_est, i_est = extract_notes_wo_velocity(pred['onset'], pred['frame'], onset_threshold, frame_threshold, rule=rule)
# print(f"p_ref = {p_ref}\n p_est = {p_est}")
t_est, f_est = notes_to_frames(p_est, i_est, pred['frame'].shape)
scaling = HOP_LENGTH / SAMPLE_RATE
# Converting time steps to seconds and midi number to frequency
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
t_est = t_est.astype(np.float64) * scaling
f_est = [np.array([midi_to_hz(MIN_MIDI + midi) for midi in freqs]) for freqs in f_est]
midi_path = os.path.join(save_path, model_type+'-'+os.path.basename(i['path'])[:-4] + 'mid')
print(f'midi_path = {midi_path}')
save_midi(midi_path, p_est, i_est, [127]*len(p_est))
log=True
mode='imagewise'
spec='Mel'
root = 'Application'
input_path = os.path.join(root, 'Input')
output_path = os.path.join(root, 'Output')
@ex.config
def config():
device='cuda:0'
model_type='ReconVAT'
# instrument='string'
@ex.automain
def main(device, model_type):
# Load audios from the Input files
application_dataset = Application_Dataset(input_path,device=device)
# Choose models
if model_type=='ReconVAT':
model = UNet((2,2),(2,2), log=log, reconstruction=True, mode=mode, spec=spec, device=device)
weight_path = 'Weight/String_MusicNet/Unet_R_VAT-XI=1e-06-eps=1.3-String_MusicNet-lr=0.001/weight.pt'
elif model_type=='baseline_Multi_Inst':
model = Semantic_Segmentation(torch.empty(1,1,640,N_BINS), 1, device=device)
weight_path = 'Weight/String_MusicNet/baseline_Multi_Inst/weight.pt'
# Load weights
print(f'Loading model weight')
model.load_state_dict(torch.load(weight_path, map_location=device))
model.to(device)
print(f'Loading done')
print(f'Transcribing Music')
transcribe2midi(tqdm(application_dataset), model, model_type, reconstruction=False,
save_path=output_path)
| 2,933 | 36.615385 | 182 | py |
ReconVAT | ReconVAT-master/train_baseline_Multi_Inst.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=False
XI= 1e-6
eps=2
small = False
supersmall = False
KL_Div = False
reconstruction = False
batch_size = 8
if small==True and supersmall==True:
train_batch_size=1
else:
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/VAT_Segmentation={reconstruction}-KL={KL_Div}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size, supersmall,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=supersmall,
dataset=train_on)
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
if train_on=='MAPS':
val_batch_size = 4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
# Need a dummy input to inference the model size
model = Semantic_Segmentation(torch.empty(1,1,640,N_BINS), 1, device=device)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 7,722 | 41.202186 | 230 | py |
ReconVAT | ReconVAT-master/train_baseline_Prestack.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 100
# file_path = 'Retrain_Prestack-lr=0.0001210325-141510'
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'String'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=1.3
small = True
KL_Div = False
reconstruction = False
batch_size = 1
train_batch_size = 1
sequence_length = 327680//8
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-5
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_Prestack-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction,root):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
model = Prestack_Model()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
# This model always crashes, need to keep saving weights and load it back when crashed.
# weight_path = os.path.join(root, file_path, 'model-400.pt')
# weight_dict = torch.load(weight_path, map_location=device)
# model.load_state_dict(weight_dict)
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
predictions, losses, optimizer = train_model(model, ep, supervised_loader,
optimizer, scheduler, clip_gradient_norm)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 6,274 | 37.030303 | 142 | py |
ReconVAT | ReconVAT-master/train_baseline_onset_frame_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 1000
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
model_complexity = 48
spec = 'Mel'
resume_iteration = None
train_on = 'String'
iteration = 10
alpha = 1
VAT=False
XI= 1e-6
eps=1e-1
VAT_mode = 'all'
model_name = 'onset_frame'
VAT_start = 0
small = True
batch_size = 8
train_batch_size = 8
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
learning_rate = 5e-4
learning_rate_decay_steps = 10000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/baseline_Onset_Frame-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, model_complexity, VAT_mode, VAT_start,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, alpha, model_name, train_batch_size,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT, XI, eps, small):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=False,
dataset=train_on)
MAPS_supervised_set, MAPS_unsupervised_set, MAPS_validation_dataset, _ = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset='MAPS')
supervised_set = ConcatDataset([supervised_set, MAPS_supervised_set])
unsupervised_set = ConcatDataset([unsupervised_set, MAPS_unsupervised_set])
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, len(validation_dataset), shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
if resume_iteration is None:
if model_name=='onset_frame':
model = OnsetsAndFrames_VAT_full(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='frame':
model = Frame_stack_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='onset':
model = Onset_stack_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
elif model_name=='attention':
model = Frame_stack_attention_VAT(N_BINS, MAX_MIDI - MIN_MIDI + 1, model_complexity=model_complexity,
log=log, mode=mode, spec=spec, XI=XI, eps=eps, VAT_mode=VAT_mode)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
for ep in range(1, epoches+1):
model.train()
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, 8, logdir, w_size, writer, False, VAT_start, reconstruction=False)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, 8, logdir, w_size, writer, True, VAT_start, reconstruction=False)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 8,271 | 45.47191 | 136 | py |
ReconVAT | ReconVAT-master/train_UNet_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm import tqdm
from model import *
ex = Experiment('train_original')
# parameters for the network
ds_ksize, ds_stride = (2,2),(2,2)
mode = 'imagewise'
sparsity = 1
output_channel = 2
logging_freq = 100
saving_freq = 200
@ex.config
def config():
root = 'runs'
# logdir = f'runs_AE/test' + '-' + datetime.now().strftime('%y%m%d-%H%M%S')
# Choosing GPU to use
# GPU = '0'
# os.environ['CUDA_VISIBLE_DEVICES']=str(GPU)
onset_stack=True
device = 'cuda:0'
log = True
w_size = 31
spec = 'Mel'
resume_iteration = None
train_on = 'Wind'
n_heads=4
position=True
iteration = 10
VAT_start = 0
alpha = 1
VAT=True
XI= 1e-6
eps=2
small = False
KL_Div = False
reconstruction = False
batch_size = 8
train_batch_size = 1
sequence_length = 327680
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
epoches = 20000
step_size_up = 100
max_lr = 1e-4
learning_rate = 1e-3
# base_lr = learning_rate
learning_rate_decay_steps = 1000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
refresh = False
logdir = f'{root}/Unet-recons={reconstruction}-XI={XI}-eps={eps}-alpha={alpha}-train_on=small_{small}_{train_on}-w_size={w_size}-n_heads={n_heads}-lr={learning_rate}-'+ datetime.now().strftime('%y%m%d-%H%M%S')
ex.observers.append(FileStorageObserver.create(logdir)) # saving source code
@ex.automain
def train(spec, resume_iteration, train_on, batch_size, sequence_length,w_size, n_heads, small, train_batch_size,
learning_rate, learning_rate_decay_steps, learning_rate_decay_rate, leave_one_out, position, alpha, KL_Div,
clip_gradient_norm, validation_length, refresh, device, epoches, logdir, log, iteration, VAT_start, VAT, XI, eps,
reconstruction):
print_config(ex.current_run)
supervised_set, unsupervised_set, validation_dataset, full_validation = prepare_VAT_dataset(
sequence_length=sequence_length,
validation_length=sequence_length,
refresh=refresh,
device=device,
small=small,
supersmall=True,
dataset=train_on)
# MAPS_supervised_set, MAPS_unsupervised_set, MAPS_validation_dataset, _ = prepare_VAT_dataset(
# sequence_length=sequence_length,
# validation_length=sequence_length,
# refresh=False,
# device=device,
# small=True,
# supersmall=True,
# dataset='MAPS')
# supervised_set = ConcatDataset([supervised_set, MAPS_supervised_set])
# unsupervised_set = ConcatDataset([unsupervised_set, MAPS_unsupervised_set])
if VAT:
unsupervised_loader = DataLoader(unsupervised_set, batch_size, shuffle=True, drop_last=True)
# supervised_set, unsupervised_set = torch.utils.data.random_split(dataset, [100, 39],
# generator=torch.Generator().manual_seed(42))
if len(validation_dataset)>4:
val_batch_size=4
else:
val_batch_size = len(validation_dataset)
supervised_loader = DataLoader(supervised_set, train_batch_size, shuffle=True, drop_last=True)
valloader = DataLoader(validation_dataset, val_batch_size, shuffle=False, drop_last=True)
batch_visualize = next(iter(valloader)) # Getting one fixed batch for visualization
ds_ksize, ds_stride = (2,2),(2,2)
if resume_iteration is None:
model = UNet(ds_ksize,ds_stride, log=log, reconstruction=reconstruction,
mode=mode, spec=spec, device=device, XI=XI, eps=eps)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
resume_iteration = 0
else: # Loading checkpoints and continue training
trained_dir='trained_MAPS' # Assume that the checkpoint is in this folder
model_path = os.path.join(trained_dir, f'{resume_iteration}.pt')
model = torch.load(model_path)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
optimizer.load_state_dict(torch.load(os.path.join(trained_dir, 'last-optimizer-state.pt')))
summary(model)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size_up,cycle_momentum=False)
scheduler = StepLR(optimizer, step_size=learning_rate_decay_steps, gamma=learning_rate_decay_rate)
# loop = tqdm(range(resume_iteration + 1, iterations + 1))
print(f'supervised_loader')
for ep in range(1, epoches+1):
if VAT==True:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, unsupervised_loader,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
else:
predictions, losses, optimizer = train_VAT_model(model, iteration, ep, supervised_loader, None,
optimizer, scheduler, clip_gradient_norm, alpha, VAT, VAT_start)
loss = sum(losses.values())
# Logging results to tensorboard
if ep == 1:
writer = SummaryWriter(logdir) # create tensorboard logger
if ep < VAT_start or VAT==False:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
False, VAT_start, reconstruction)
else:
tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
True, VAT_start, reconstruction)
# Saving model
if (ep)%saving_freq == 0:
torch.save(model.state_dict(), os.path.join(logdir, f'model-{ep}.pt'))
torch.save(optimizer.state_dict(), os.path.join(logdir, 'last-optimizer-state.pt'))
for key, value in {**losses}.items():
writer.add_scalar(key, value.item(), global_step=ep)
# Evaluating model performance on the full MAPS songs in the test split
print('Training finished, now evaluating on the MAPS test split (full songs)')
with torch.no_grad():
model = model.eval()
metrics = evaluate_wo_velocity(tqdm(full_validation), model, reconstruction=False,
save_path=os.path.join(logdir,'./MIDI_results'))
for key, values in metrics.items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
export_path = os.path.join(logdir, 'result_dict')
pickle.dump(metrics, open(export_path, 'wb'))
| 8,580 | 43.926702 | 213 | py |
ReconVAT | ReconVAT-master/model/self_attention_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from itertools import cycle
def create_triangular_cycle(start, end, period):
triangle_a = torch.linspace(start,end,period)
triangle_b = torch.linspace(end,start,period)[1:-1]
triangle=torch.cat((triangle_a,triangle_b))
return cycle(triangle)
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, f"out_channels should be divided by groups. (example: out_channels: 40, groups: 4). Now out_channels={self.out_features}, groups={self.groups}"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, binwise=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = binwise
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred, _ = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _ = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
class UNet_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model.transcriber(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
class onset_frame_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
def forward(self, model, x):
with torch.no_grad():
y_ref, _, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=False)
x_adv = (x + r).clamp(0,1)
y_pred, _, _ = model(x_adv)
dist =F.binary_cross_entropy(y_pred, y_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=False)
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred, _, _ = model(x_adv)
# print(f'x_adv max = {x_adv.max()}\tx_adv min = {x_adv.min()}')
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 1e-4, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 1e-4, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
assert torch.isnan(p.log()).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(p.log()).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
return F.kl_div(p.log(), q, reduction='batchmean')
class VAT_self_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2,
eps_period=False, eps_max=1, KL_Div=False):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm = nn.LayerNorm(model_complexity)
self.linear = nn.Linear(model_complexity, output_features)
self.vat_loss = stepwise_VAT(XI, eps, 1, KL_Div, False)
self.eps_period = eps_period
if self.eps_period:
self.triangular_cycle = create_triangular_cycle(eps,eps_max,eps_period)
def forward(self, spec):
x, a = self.sequence_model(spec)
x = self.layer_norm(x)
x = self.linear(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
frame_pred, a = self(spec)
if self.training:
if self.eps_period:
self.vat_loss.eps = next(self.triangular_cycle)
print(f'eps = {self.vat_loss.eps}')
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class ConvStack(nn.Module):
def __init__(self, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
freq_features = self._get_conv_output()
self.fc = nn.Sequential(
nn.Linear(freq_features, output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = self.cnn(spec)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.transpose(1, 2).flatten(-2).size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
class Timbral_CNN(nn.Module):
def __init__(self, start_channel, final_channel, output_features):
super().__init__()
self.cnn = nn.Sequential(
# Old
# nn.Conv2d(1, start_channel, (1, 21)),
# nn.BatchNorm2d(start_channel),
# nn.ReLU(),
# # nn.Conv2d(start_channel, start_channel, (1, 51)),
# # nn.ReLU(),
# nn.Conv2d(start_channel, start_channel, (3, 51), padding=(1,0)),
# nn.BatchNorm2d(start_channel),
# nn.ReLU(),
# nn.MaxPool2d((1, 2)),
# # nn.Dropout(0.25),
# # nn.Conv2d(start_channel//2, start_channel//2, (3, 51), padding=(1,0)),
# # nn.ReLU(),
# nn.Conv2d(start_channel, final_channel, (7, 21), padding=(3,0)),
# nn.BatchNorm2d(final_channel),
# nn.ReLU(),
# nn.MaxPool2d((1, 2)),
# # nn.Dropout(0.25),
# # nn.Conv2d(final_channel, final_channel, (7, 51), padding=(3,0)),
# # nn.ReLU(),
# new------------
nn.Conv2d(1, start_channel, (3, 3), padding=1),
nn.BatchNorm2d(start_channel),
nn.ReLU(),
# nn.Conv2d(start_channel, start_channel, (1, 51)),
# nn.ReLU(),
nn.Conv2d(start_channel, start_channel, (3, 3), padding=1),
nn.BatchNorm2d(start_channel),
nn.ReLU(),
nn.MaxPool2d((1, 2)),
# nn.Dropout(0.25),
# nn.Conv2d(start_channel//2, start_channel//2, (3, 51), padding=(1,0)),
# nn.ReLU(),
nn.Conv2d(start_channel, final_channel, (3, 3), padding=1),
nn.BatchNorm2d(final_channel),
nn.ReLU(),
nn.MaxPool2d((1, 2)),
# nn.Dropout(0.25),
# nn.Conv2d(final_channel, final_channel, (7, 51), padding=(3,0)),
# nn.ReLU(),
)
# input is batch_size * 1 channel * frames * input_features
freq_features = self._get_conv_output()
self.fc = nn.Sequential(
nn.Linear(freq_features, output_features),
)
def forward(self, spec):
x = self.cnn(spec)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.transpose(1, 2).flatten(-2).size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
class VAT_CNN_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2, version='a'):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
if version=='a':
self.cnn = ConvStack(output_features)
elif version=='b':
# input is batch_size * 1 channel * frames * input_features
self.cnn = Timbral_CNN(32,8,output_features)
self.sequence_model = MutliHeadAttention1D(in_features=output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm = nn.LayerNorm(model_complexity)
self.linear = nn.Linear(model_complexity, output_features)
self.vat_loss = stepwise_VAT(XI, eps,1, False)
self.triangular_cycle = create_triangular_cycle(1e-2,10,50)
# def _get_conv_output(self):
# shape = (1, 640, 229)
# bs = 1
# input = torch.rand(bs, *shape)
# output_feat = self._forward_features(input)
# # n_size = output_feat.data.view(bs, -1).size(1)
# return output_feat.size(-1)
# def _forward_features(self, x):
# x = self.cnn(x)
# return x
def forward(self, spec):
x = self.cnn(spec.unsqueeze(1))
# x = x.transpose(1,2).flatten(2)
# 1 Layer = ([8, 8, 640, 687])
x, a = self.sequence_model(x)
x = self.layer_norm(x)
x = self.linear(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0)
frame_pred, a = self(spec)
self.vat_loss.eps = next(self.triangular_cycle)
print(f'VAT eps={self.vat_loss.eps}')
# print(f'loss = {F.binary_cross_entropy(frame_pred, frame_label)}')
if self.training:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class VAT_CNN_attention_onset_frame(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, XI=1e-5, eps=1e-2):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
start_channel = 48
final_channel = 96
# input is batch_size * 1 channel * frames * input_features
self.cnn = Timbral_CNN(start_channel,final_channel,output_features)
self.onset_timbral_cnn = Timbral_CNN(start_channel, final_channel, output_features)
freq_features = self._get_conv_output()
self.onset_attention = MutliHeadAttention1D(in_features=output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_onset = nn.LayerNorm(model_complexity)
self.onset_classifier = nn.Linear(model_complexity, output_features)
self.final_attention = MutliHeadAttention1D(in_features=2*output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_final = nn.LayerNorm(model_complexity)
self.final_classifier = nn.Linear(model_complexity, output_features)
self.vat_loss = onset_frame_VAT(XI, eps,1)
def _get_conv_output(self):
shape = (1, 640, 229)
bs = 1
input = torch.rand(bs, *shape)
output_feat = self._forward_features(input)
# n_size = output_feat.data.view(bs, -1).size(1)
return output_feat.size(-1)
def _forward_features(self, x):
x = self.cnn(x)
return x
def forward(self, spec):
onset_pred = self.onset_timbral_cnn(spec.unsqueeze(1))
onset_pred, _ = self.onset_attention(onset_pred)
# onset_pred, _ = self.onset_attention(spec)
onset_pred = self.layer_norm_onset(onset_pred)
onset_pred = self.onset_classifier(onset_pred)
onset_pred = torch.sigmoid(onset_pred)
activation = self.cnn(spec.unsqueeze(1))
# activation shape = (8, 8, 640, freq_freatures)
# activation shape = (8, 640, freq_freatures*8)
# 1 Layer = ([8, 8, 640, 687])
x, a = self.final_attention(torch.cat((onset_pred, activation), dim=-1))
x = self.layer_norm_final(x)
x = self.final_classifier(x)
frame_pred = torch.sigmoid(x)
return frame_pred, onset_pred, a
def run_on_batch(self, batch_l, batch_ul=None, VAT=False):
audio_label = batch_l['audio']
onset_label = batch_l['onset']
frame_label = batch_l['frame']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
frame_pred, onset_pred, a = self(spec)
# print(f'loss = {F.binary_cross_entropy(frame_pred, frame_label)}')
if self.training:
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul
}
else:
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Spec2Roll(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*complexity, 31, position=True, groups=complexity)
# self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
self.linear1 = nn.Linear(N_BINS*complexity, 88)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
x, a = self.lstm1(x.squeeze(1)) # remove the channel dim
pianoroll = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return pianoroll, a
class Roll2Spec(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
self.lstm2 = MutliHeadAttention1D(88, N_BINS*complexity, 31, position=True, groups=4)
self.linear2 = nn.Linear(N_BINS*complexity, N_BINS)
def forward(self, x):
# U-net 2
x, a = self.lstm2(x)
x= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
x,s,c = self.Unet2_encoder(x.unsqueeze(1))
reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# x,s,c = self.Unet2_encoder(x.unsqueeze(1))
# x = self.Unet2_decoder(x,s,c) # predict roll
# x, a = self.lstm2(x.squeeze(1))
# reconstruction = self.linear2(x) # ToDo, remove the sigmoid activation and see if we get a better result
# reconstruction = reconstruction.clamp(0,1).unsqueeze(1)
return reconstruction, a
class Reconstructor(nn.Module):
def __init__(self, ds_ksize, ds_stride):
super().__init__()
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.normalize = Normalization('imagewise')
def forward(self, x):
reconstruction, a = self.reconstructor(x)
return reconstruction, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
reconstrut, a = self(frame_label)
predictions = {
'attention': a,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.binary_cross_entropy(reconstrut.squeeze(1), spec.squeeze(1).detach()),
}
return predictions, losses, spec.squeeze(1)
class UNet(nn.Module):
def __init__(self, ds_ksize, ds_stride, log=True, reconstruction=True, mode='imagewise', spec='CQT', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
elif spec == 'CFP':
self.spectrogram = Spectrogram.CFP(fs=SAMPLE_RATE,
fr=4,
window_size=WINDOW_LENGTH,
hop_length=HOP_LENGTH,
fc=MEL_FMIN,
tc=1/MEL_FMAX)
N_BINS = self.spectrogram.quef2logfreq_matrix.shape[0]
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.reconstruction = reconstruction
self.vat_loss = UNet_VAT(XI, eps, 1, False)
# self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*4, 31, position=True, groups=4)
# # self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
# self.linear1 = nn.Linear(N_BINS*4, 88)
self.transcriber = Spec2Roll(ds_ksize, ds_stride)
if reconstruction==True:
# self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# # self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
# self.lstm2 = MutliHeadAttention1D(88, N_BINS*4, 31, position=True, groups=4)
# self.linear2 = nn.Linear(N_BINS*4, N_BINS)
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
pianoroll, a = self.transcriber(x)
if self.reconstruction:
# U-net 2
reconstruction, a_reconstruct = self.reconstructor(pianoroll)
# Applying U-net 1 to the reconstructed spectrograms
pianoroll2, a_2 = self.transcriber(reconstruction)
# # U-net2
# x, h = self.lstm2(pianoroll)
# feat2= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
# x,s,c = self.Unet2_encoder(feat2.unsqueeze(1))
# reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# # Applying U-net 1 to the reconstructed spectrograms
# x,s,c = self.Unet1_encoder(reconstruction)
# feat1b = self.Unet1_decoder(x,s,c)
# x, h = self.lstm1(feat1b.squeeze(1)) # remove the channel dim
# pianoroll2 = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return reconstruction, pianoroll, pianoroll2, a
else:
return pianoroll, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
if self.reconstruction:
reconstrut, pianoroll, pianoroll2, a = self(spec)
if self.training:
predictions = {
'onset': pianoroll,
'frame': pianoroll,
'frame2':pianoroll2,
'onset2':pianoroll2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': pianoroll.reshape(*frame_label.shape),
'frame': pianoroll.reshape(*frame_label.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':pianoroll2.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
else:
frame_pred, a = self(spec)
if self.training:
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def run_on_batch_application(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
reconstrut, ul_pianoroll, ul_pianoroll2, a = self(spec)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
reconstrut, pianoroll, pianoroll2, a = self(spec)
if self.training:
predictions = {
'onset': pianoroll,
'frame': pianoroll,
'frame2':pianoroll2,
'onset2':pianoroll2,
'ul_frame': ul_pianoroll,
'ul_frame2': ul_pianoroll2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
# 'loss/ul_consistency_wrt2': F.binary_cross_entropy(predictions['ul_frame'].squeeze(1), predictions['ul_frame2'].squeeze(1).detach()),
'loss/ul_consistency_wrt1': F.binary_cross_entropy(predictions['ul_frame2'].squeeze(1), predictions['ul_frame'].squeeze(1).detach()),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': pianoroll.reshape(*frame_label.shape),
'frame': pianoroll.reshape(*frame_label.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':pianoroll2.reshape(*frame_label.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def transcribe(self, batch):
audio_label = batch['audio']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
reconstrut, pianoroll, pianoroll2, a = self(spec)
predictions = {
'onset': pianoroll,
'frame': pianoroll,
}
return predictions
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
| 56,736 | 41.788084 | 196 | py |
ReconVAT | ReconVAT-master/model/VAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
def forward(self, model, x):
with torch.no_grad():
y_ref, _ = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d)
y_pred, _ = model(x + r)
dist =F.binary_cross_entropy(y_pred, y_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d)
# logit_p = logit.detach()
y_pred, _ = model(x + r_adv)
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d):
d = d/torch.norm(d, dim=2, keepdim=True)
return d | 1,478 | 32.613636 | 88 | py |
ReconVAT | ReconVAT-master/model/Unet_prestack.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Prestack(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
return x
class Prestack_Model(nn.Module):
def __init__(self, model='resnet18'):
super().__init__()
unet = Prestack((3,3),(1,1))
resnet = torch.hub.load('pytorch/vision:v0.9.0', model, pretrained=False)
resnet.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)
resnet.fc = torch.nn.Linear(512, 88, bias=True)
self.prestack_model = nn.Sequential(unet, resnet)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.normalize = Normalization('imagewise')
def forward(self, x):
return self.prestack_model(x)
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# Change the shape such that it fits Thickstun Model
spec_padded = torch.nn.functional.pad(spec, (12, 12)) # (batch, 229, 640+24)
spec_padded = spec_padded.unfold(2, 25, 1) # extract 25 timesteps from the padded spec, stride=1, dim=2
spec_padded = spec_padded.transpose(1,2).reshape(-1, 229, 25) # Cut spectrogram into segments as a batch
spec_padded = spec_padded.unsqueeze(1) # create 1 channel for CNN
# print(f'spec_padded shape = {spec_padded.shape}')
frame_pred = torch.zeros(spec_padded.shape[0], 88).to(spec_padded.device)
for idx, i in enumerate(spec_padded):
output = self(i.unsqueeze(0)).squeeze(0)
frame_pred[idx] = output
# print(f'idx = {idx}\tfoward done = {output.shape}')
frame_pred = torch.sigmoid(frame_pred)
# frame_pred = torch.sigmoid(self(spec_padded))
# print(f'frame_pred max = {frame_pred.max()}\tframe_pred min = {frame_pred.min()}')
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': None
}
try:
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label.reshape(-1,88)),
}
except:
print('The prediction contains negative values')
print(f'frame_pred min = {frame_pred.min()}')
print(f'frame_pred max = {frame_pred.max()}')
return predictions, losses, spec.squeeze(1) | 7,503 | 41.636364 | 124 | py |
ReconVAT | ReconVAT-master/model/onset_frame_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn.functional as F
from torch import nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from torch.nn.utils import clip_grad_norm_
import torch.nn.init as init
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
# class stepwise_VAT(nn.Module):
# """
# We define a function of regularization, specifically VAT.
# """
# def __init__(self, XI, epsilon, n_power, VAT_mode):
# super().__init__()
# self.n_power = n_power
# self.XI = XI
# self.epsilon = epsilon
# self.VAT_mode = VAT_mode
# def forward(self, model, x):
# with torch.no_grad():
# onset_ref, activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# # generate_virtual_adversarial_perturbation
# d = torch.randn_like(x, requires_grad=True) # Need gradient
# for _ in range(self.n_power):
# r = self.XI * _l2_normalize(d)
# onset_pred, activation_pred, frame_pred = model(x + r)
# dist_onset =F.binary_cross_entropy(onset_pred, onset_ref)
# dist_activation =F.binary_cross_entropy(activation_pred, activation_ref)
# dist_frame =F.binary_cross_entropy(frame_pred, frame_ref)
# if self.VAT_mode == 'onset':
# dist = dist_onset
# elif self.VAT_mode == 'activation':
# dist = dist_activation
# elif self.VAT_mode == 'frame':
# dist = dist_frame
# elif self.VAT_mode == 'all':
# dist = dist_frame + dist_activation + dist_onset
# dist.backward() # Calculate gradient wrt d
# d = d.grad.detach()
# model.zero_grad() # prevent gradient change in the model
# # generating virtual labels and calculate VAT
# r_adv = self.epsilon * _l2_normalize(d)
# onset_pred, activation_pred, frame_pred = model(x + r_adv)
# vat_onset =F.binary_cross_entropy(onset_pred, onset_ref)
# vat_activation =F.binary_cross_entropy(activation_pred, activation_ref)
# vat_frame =F.binary_cross_entropy(frame_pred, frame_ref)
# if self.VAT_mode == 'onset':
# vat_loss = vat_onset
# elif self.VAT_mode == 'activation':
# vat_loss = vat_activation
# elif self.VAT_mode == 'frame':
# vat_loss = vat_frame
# elif self.VAT_mode == 'all':
# vat_loss = vat_frame + vat_activation + vat_onset
# return vat_loss, r_adv # already averaged
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 0, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 0, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
return F.kl_div(p.log(), q, reduction='batchmean')
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
if KL_Div==True:
self.binwise = False
else:
self.binwise = False
def forward(self, model, x):
with torch.no_grad():
onset_ref, activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
onset_pred, activation_pred, frame_pred = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(frame_pred, frame_ref)
else:
loss =F.binary_cross_entropy(frame_pred, frame_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, "r_adv contains nan"
assert torch.isinf(r_adv).any()==False, "r_adv contains nan"
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
onset_pred, activation_pred, frame_pred = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(frame_pred, frame_ref)
else:
vat_loss = F.binary_cross_entropy(frame_pred, frame_ref)
return vat_loss, r_adv, _l2_normalize(d*1e8, binwise=self.binwise) # already averaged
class stepwise_VAT_frame_stack(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, VAT_mode):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.VAT_mode = VAT_mode
def forward(self, model, x):
with torch.no_grad():
activation_ref, frame_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, False)
x_adv = (x+r).clamp(0,1)
activation_pred, frame_pred = model(x_adv)
dist_activation = F.mse_loss(activation_pred, activation_ref)
dist_frame = F.binary_cross_entropy(frame_pred, frame_ref)
if self.VAT_mode == 'activation':
dist = dist_activation
elif self.VAT_mode == 'frame':
dist = dist_frame
elif self.VAT_mode == 'all':
dist = dist_frame + dist_activation
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e20
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
# print(f'dist = {dist}')
# print(f'd mean = {d.mean()}\std = {d.std()}')
# print(f'd norm mean = {_l2_normalize(d).mean()}\tstd = {_l2_normalize(d).std()}')
r_adv = self.epsilon * _l2_normalize(d, False)
assert torch.isnan(r_adv).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(r_adv).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
x_adv = (x+r_adv).clamp(0,1)
activation_pred, frame_pred = model(x_adv)
assert torch.isnan(activation_pred).any()==False, "activation_pred is nan, please debug"
assert torch.isnan(frame_pred).any()==False, "frame_pred is nan, please debug"
vat_activation =F.mse_loss(activation_pred, activation_ref)
vat_frame =F.binary_cross_entropy(frame_pred, frame_ref)
if self.VAT_mode == 'activation':
vat_loss = vat_activation
elif self.VAT_mode == 'frame':
vat_loss = vat_frame
elif self.VAT_mode == 'all':
vat_loss = vat_frame + vat_activation
return vat_loss, r_adv # already averaged
class stepwise_VAT_onset_stack(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, VAT_mode):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.VAT_mode = VAT_mode
def forward(self, model, x):
with torch.no_grad():
onset_ref = model(x) # This will be used as a label, therefore no need grad()
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d)
onset_pred = model(x + r)
dist = F.binary_cross_entropy(onset_pred, onset_ref)
dist.backward() # Calculate gradient wrt d
d = d.grad.detach()
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d)
assert torch.isnan(r_adv).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(r_adv).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
onset_pred = model(x + r_adv)
assert torch.isnan(activation_pred).any()==False, "activation_pred is nan, please debug"
assert torch.isnan(frame_pred).any()==False, "frame_pred is nan, please debug"
vat_loss = F.binary_cross_entropy(onset_pred, onset_ref)
return vat_loss, r_adv # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
class ConvStack(nn.Module):
def __init__(self, input_features, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
self.fc = nn.Sequential(
nn.Linear((output_features // 8) * (input_features // 4), output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = spec.view(spec.size(0), 1, spec.size(1), spec.size(2))
x = self.cnn(x)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
class Onset_Stack(nn.Module):
def __init__(self, input_features, model_size, output_features, sequence_model):
super().__init__()
self.convstack = ConvStack(input_features, model_size)
self.sequence_model = sequence_model
if self.sequence_model:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_LSTM
else:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_noLSTM
def forward_LSTM(self, x):
x = self.convstack(x)
if self.training:
x, (h, c) = self.sequence_model(x)
else:
self.train()
x, (h, c) = self.sequence_model(x)
self.eval()
x = self.linear(x)
return torch.sigmoid(x)
def forward_noLSTM(self, x):
x = self.convstack(x)
x = self.linear(x)
return torch.sigmoid(x)
class Combine_Stack(nn.Module):
def __init__(self, model_size, output_features, sequence_model):
super().__init__()
self.sequence_model = sequence_model
if self.sequence_model:
self.linear = nn.Linear(model_size, output_features)
self.forward = self.forward_LSTM
else:
self.linear = nn.Linear(output_features, output_features)
self.forward = self.forward_noLSTM
def forward_LSTM(self, x):
if self.training:
x, _ = self.sequence_model(x)
else:
self.train()
x, _ = self.sequence_model(x)
self.eval()
x = self.linear(x)
return torch.sigmoid(x)
def forward_noLSTM(self,x):
x = self.linear(x)
return torch.sigmoid(x)
class Frame_stack_VAT(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
# sequence_model = lambda input_size, output_size: MutliHeadAttention1D(input_size, output_size, 31, position=True, groups=4)
self.vat_loss = stepwise_VAT_frame_stack(XI, eps,1, VAT_mode)
self.combined_stack = Combine_Stack(model_size, output_features, sequence_model(output_features, model_size))
# self.combined_stack = Combine_Stack(model_size, output_features, None)
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
activation_pred = self.frame_stack(spec)
combined_pred = activation_pred
frame_pred = self.combined_stack(combined_pred)
# velocity_pred = self.velocity_stack(mel)
return activation_pred, frame_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if batch_ul and VAT:
audio_label_ul = batch_ul['audio']
spec_ul = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec_ul = torch.log(spec_ul + 1e-5)
spec_ul = self.normalize.transform(spec_ul)
spec_ul = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec_ul)
else:
lds_ul = torch.tensor(0.)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
activation_pred, frame_pred = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_pred.shape),
# 'offset': offset_pred.reshape(*offset_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv
}
# no need
if self.training:
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_LDS': (lds_ul+lds_l)/2
}
else:
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_LDS': lds_l
}
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class Onset_stack_VAT(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
self.vat_loss = stepwise_VAT_onset_stack(XI, eps, 1, VAT_mode)
self.onset_stack = Onset_Stack(input_features, model_size, output_features, sequence_model(model_size, model_size))
def forward(self, spec):
onset_pred = self.onset_stack(spec)
return onset_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if batch_ul and VAT:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec_ul = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
lds_ul, _ = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
if VAT:
lds_l, r_adv = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
onset_pred = self(spec)
accuracy = (onset_label == (onset_pred>0.5)).float().sum()/onset_label.flatten(0).shape[0]
predictions = {
'onset': onset_pred.reshape(*onset_pred.shape),
'r_adv': r_adv
}
# no need
if self.training:
losses = {
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'metric/train_accuracy': accuracy,
'loss/train_LDS': torch.mean(torch.stack((lds_ul,lds_l)),dim=0)
}
else:
losses = {
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'metric/test_accuracy': accuracy,
'loss/test_LDS': lds_l
}
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class OnsetsAndFrames_VAT_full(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', XI=1e-5, eps=10, VAT_mode='all'):
super().__init__()
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: nn.LSTM(input_size, output_size // 2, batch_first=True, bidirectional=True)
# Need to rewrite this part, since we are going to modify the LSTM
self.vat_loss = stepwise_VAT(XI, eps,1, False)
self.onset_stack = Onset_Stack(input_features, model_size, output_features, sequence_model(model_size, model_size))
self.combined_stack = Combine_Stack(model_size, output_features, sequence_model(output_features * 2, model_size))
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
onset_pred = self.onset_stack(spec)
# offset_pred = self.offset_stack(mel)
activation_pred = self.frame_stack(spec)
combined_pred = torch.cat([onset_pred.detach(), activation_pred], dim=-1)
frame_pred = self.combined_stack(combined_pred)
# velocity_pred = self.velocity_stack(mel)
return onset_pred, activation_pred, frame_pred
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
if self.onset_stack:
onset_label = batch['onset']
# offset_label = batch['offset']
frame_label = batch['frame']
# velocity_label = batch['velocity']
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2)
# print(f'run_batch label = {frame_label.shape}')
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
onset_pred, activation_pred, frame_pred = self(spec)
if self.training:
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec
return predictions, losses, spec
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
| 29,455 | 39.685083 | 150 | py |
ReconVAT | ReconVAT-master/model/constants.py | import torch
SAMPLE_RATE = 16000
HOP_LENGTH = SAMPLE_RATE * 32 // 1000
ONSET_LENGTH = SAMPLE_RATE * 32 // 1000
OFFSET_LENGTH = SAMPLE_RATE * 32 // 1000
HOPS_IN_ONSET = ONSET_LENGTH // HOP_LENGTH
HOPS_IN_OFFSET = OFFSET_LENGTH // HOP_LENGTH
MIN_MIDI = 21
MAX_MIDI = 108
N_BINS = 229 # Default using Mel spectrograms
MEL_FMIN = 30
MEL_FMAX = SAMPLE_RATE // 2
# New parameter for Guqin
# N_BINS = 400 # Default using Mel spectrograms
# MEL_FMIN = 20
# MEL_FMAX = SAMPLE_RATE // 2
WINDOW_LENGTH = 2048
#DEFAULT_DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
| 570 | 20.961538 | 64 | py |
ReconVAT | ReconVAT-master/model/Thickstun_model.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class Thickstun(torch.nn.Module):
def __init__(self):
super(Thickstun, self).__init__()
# Create filter windows
# Creating Layers
self.normalize = Normalization('imagewise')
k_out = 128
k2_out = 4096
self.CNN_freq = nn.Conv2d(1,k_out,
kernel_size=(128,1),stride=(2,1))
self.CNN_time = nn.Conv2d(k_out,k2_out,
kernel_size=(1,25),stride=(1,1))
self.linear = torch.nn.Linear(k2_out*51, 88, bias=False)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
# Initialize weights
# Do something
def forward(self,x):
z2 = torch.relu(self.CNN_freq(x.unsqueeze(1))) # Make channel as 1 (N,C,H,W) shape = [10, 128, 193, 25]
# print(f'z2 = {z2.shape}')
z3 = torch.relu(self.CNN_time(z2)) # shape = [10, 256, 193, 1]
# print(f'z3 = {z3.shape}')
y = self.linear(torch.relu(torch.flatten(z3,1)))
return torch.sigmoid(y)
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# Change the shape such that it fits Thickstun Model
spec_padded = torch.nn.functional.pad(spec, (12, 12)) # (batch, 229, 640+24)
spec_padded = spec_padded.unfold(2, 25, 1) # extract 25 timesteps from the padded spec, stride=1, dim=2
spec_padded = spec_padded.transpose(1,2).reshape(-1, 229, 25) # Cut spectrogram into segments as a batch
frame_pred = self(spec_padded)
# print(f'output shape = {frame_pred.shape}')
# print(f'label shape = {frame_label.shape}')
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': None
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label.reshape(-1,88)),
}
return predictions, losses, spec.squeeze(1) | 3,069 | 41.054795 | 122 | py |
ReconVAT | ReconVAT-master/model/helper_functions.py | import os
from model.dataset import *
from model.evaluate_functions import evaluate_wo_velocity
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils import clip_grad_norm_
import numpy as np
# Mac users need to uncomment these two lines
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
from collections import defaultdict
def cycle(iterable):
while True:
for item in iterable:
yield item
def prepare_dataset(train_on, sequence_length, validation_length, leave_one_out, refresh, device, small=False):
train_groups, validation_groups = ['train'], ['validation'] # Parameters for MAESTRO
if leave_one_out is not None: # It applies only to MAESTRO
all_years = {'2004', '2006', '2008', '2009', '2011', '2013', '2014', '2015', '2017'}
train_groups = list(all_years - {str(leave_one_out)})
validation_groups = [str(leave_one_out)]
# Choosing the dataset to use
if train_on == 'MAESTRO':
dataset = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MAESTRO(groups=validation_groups, sequence_length=sequence_length)
# validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=validation_length, device=device, refresh=refresh)
elif train_on == 'MusicNet':
dataset = MusicNet(groups=['train'], sequence_length=sequence_length, device=device, refresh=refresh)
validation_dataset = MusicNet(groups=['test'], sequence_length=sequence_length, device=device, refresh=refresh)
else:
dataset = MAPS(groups=['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2'],
sequence_length=sequence_length, overlap=False, device=device, refresh=refresh)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'],
sequence_length=validation_length, overlap=True, device=device, refresh=refresh)
full_validation = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
return dataset, validation_dataset, full_validation
def prepare_VAT_dataset(sequence_length, validation_length, refresh, device, small=False, supersmall=False, dataset='MAPS'):
train_groups, validation_groups = ['train'], ['validation'] # Parameters for MAESTRO
if dataset=='MAPS':
# Choosing the dataset to use
if small==True:
l_set = MAPS(groups=['AkPnBcht'],
sequence_length=sequence_length, overlap=False, device=device,
refresh=refresh, supersmall=supersmall)
else:
l_set = MAPS(groups=['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2'],
sequence_length=sequence_length, overlap=False, device=device, refresh=refresh)
ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=validation_length, overlap=True, device=device, refresh=refresh)
full_validation = MAPS(groups=['ENSTDkAm', 'ENSTDkCl'], sequence_length=None, device=device, refresh=refresh)
elif dataset=='Violin':
l_set = MusicNet(groups=['train_violin_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_violin_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_violin'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_violin'], sequence_length=None, device=device)
elif dataset=='String':
l_set = MusicNet(groups=['train_string_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_string_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_violin'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_violin'], sequence_length=None, device=device)
elif dataset=='Wind':
l_set = MusicNet(groups=['train_wind_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_wind_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_wind'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_wind'], sequence_length=None, device=device)
elif dataset=='Flute':
l_set = MusicNet(groups=['train_flute_l'],
sequence_length=sequence_length, device=device)
ul_set = MusicNet(groups=['train_flute_ul'],
sequence_length=sequence_length, device=device)
# ul_set = MAESTRO(groups=train_groups, sequence_length=sequence_length, device=device)
validation_dataset = MusicNet(groups=['test_flute'], sequence_length=validation_length, device=device)
full_validation = MusicNet(groups=['test_flute'], sequence_length=None, device=device)
elif dataset=='Guqin':
l_set = Guqin(groups=['train_l'],
sequence_length=sequence_length, device=device, refresh=refresh)
ul_set = Guqin(groups=['train_ul'],
sequence_length=sequence_length, device=device, refresh=refresh)
validation_dataset = Guqin(groups=['test'], sequence_length=validation_length, device=device, refresh=refresh)
full_validation = Guqin(groups=['test'], sequence_length=None, device=device, refresh=refresh)
else:
raise Exception("Please choose the correct dataset")
return l_set, ul_set, validation_dataset, full_validation
def tensorboard_log(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize, None, VAT)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
with torch.no_grad():
for key, values in evaluate_wo_velocity(validation_dataset, model, reconstruction=reconstruction, VAT=False).items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
if ('precision' in name or 'recall' in name or 'f1' in name) and 'chroma' not in name:
writer.add_scalar(key, np.mean(values), global_step=ep)
# if key.startswith('loss/'):
# writer.add_scalar(key, np.mean(values), global_step=ep)
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
if ep%logging_freq == 0:
for output_key in ['frame', 'onset', 'frame2', 'onset2']:
if output_key in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions[output_key].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure(f'images/{output_key}', fig , ep)
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['frame'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/Transcription', fig , ep)
# if 'onset' in predictions.keys():
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['onset'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/onset', fig , ep)
if 'activation' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions['activation'].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/activation', fig , ep)
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show adversarial samples
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(predictions['attention'].shape[0]):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(predictions['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def tensorboard_log_without_VAT(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
with torch.no_grad():
for key, values in evaluate_wo_velocity(validation_dataset, model, reconstruction=reconstruction, VAT=False).items():
if key.startswith('metric/'):
_, category, name = key.split('/')
print(f'{category:>32} {name:25}: {np.mean(values):.3f} ± {np.std(values):.3f}')
if ('precision' in name or 'recall' in name or 'f1' in name) and 'chroma' not in name:
writer.add_scalar(key, np.mean(values), global_step=ep)
# if key.startswith('loss/'):
# writer.add_scalar(key, np.mean(values), global_step=ep)
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
if ep%logging_freq == 0:
for output_key in ['frame']:
if output_key in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
predictions[output_key] = predictions[output_key].reshape(4,-1,88)
for idx, i in enumerate(predictions[output_key].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure(f'images/{output_key}', fig , ep)
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['frame'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/Transcription', fig , ep)
# if 'onset' in predictions.keys():
# fig, axs = plt.subplots(2, 2, figsize=(24,4))
# axs = axs.flat
# for idx, i in enumerate(predictions['onset'].detach().cpu().numpy()):
# axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
# axs[idx].axis('off')
# fig.tight_layout()
# writer.add_figure('images/onset', fig , ep)
if 'activation' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(predictions['activation'].detach().cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/activation', fig , ep)
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show adversarial samples
if predictions['r_adv'] is not None:
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
x_adv = i.transpose()+predictions['r_adv'][idx].t().cpu().numpy()
axs[idx].imshow(x_adv, vmax=1, vmin=0, cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Spec_adv', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(predictions['attention'].shape[0]):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(predictions['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def tensorboard_log_transcriber(batch_visualize, model, validation_dataset, supervised_loader,
ep, logging_freq, saving_freq, n_heads, logdir, w_size, writer,
VAT, VAT_start, reconstruction):
model.eval()
predictions, losses, mel = model.run_on_batch(batch_visualize, None, VAT)
loss = sum(losses.values())
if (ep)%logging_freq==0 or ep==1:
model.eval()
test_losses = eval_model(model, ep, supervised_loader, VAT_start, VAT)
for key, values in test_losses.items():
if key.startswith('loss/'):
writer.add_scalar(key, np.mean(values), global_step=ep)
if ep==1: # Showing the original transcription and spectrograms
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(mel.cpu().detach().numpy()):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Original', fig , ep)
fig, axs = plt.subplots(2, 2, figsize=(24,4))
axs = axs.flat
for idx, i in enumerate(batch_visualize['frame'].cpu().numpy()):
axs[idx].imshow(i.transpose(), origin='lower', vmax=1, vmin=0)
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Label', fig , ep)
if ep%logging_freq == 0:
if 'reconstruction' in predictions.keys():
fig, axs = plt.subplots(2, 2, figsize=(24,8))
axs = axs.flat
for idx, i in enumerate(predictions['reconstruction'].cpu().detach().numpy().squeeze(1)):
axs[idx].imshow(i.transpose(), cmap='jet', origin='lower')
axs[idx].axis('off')
fig.tight_layout()
writer.add_figure('images/Reconstruction', fig , ep)
# show attention
if 'attention' in predictions.keys():
fig = plt.figure(figsize=(90, 45))
# Creating the grid for 2 attention head for the transformer
outer = gridspec.GridSpec(2, 4, wspace=0.2, hspace=0.2)
fig.suptitle("Visualizing Attention Heads", size=20)
attentions = predictions['attention']
for i in range(n_heads):
# Creating the grid for 4 samples
inner = gridspec.GridSpecFromSubplotSpec(2, 2,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
ax = plt.Subplot(fig, outer[i])
ax.set_title(f'Head {i}', size=20) # This does not show up
for idx in range(4):
axCenter = plt.Subplot(fig, inner[idx])
fig.add_subplot(axCenter)
attention = attentions[idx, :, i]
attention = flatten_attention(attention, w_size)
axCenter.imshow(attention.cpu().detach(), cmap='jet')
attended_features = mel[idx]
# Create another plot on top and left of the attention map
divider = make_axes_locatable(axCenter)
axvert = divider.append_axes('left', size='30%', pad=0.5)
axhoriz = divider.append_axes('top', size='20%', pad=0.25)
axhoriz.imshow(attended_features.t().cpu().detach(), aspect='auto', origin='lower', cmap='jet')
axvert.imshow(batch_visualize['frame'][idx].cpu().detach(), aspect='auto')
# changing axis for the center fig
axCenter.set_xticks([])
# changing axis for the output fig (left fig)
axvert.set_yticks([])
axvert.xaxis.tick_top()
axvert.set_title('Transcription')
axhoriz.set_title(f'Attended Feature (Spec)')
axhoriz.margins(x=0)
axvert.margins(y=0)
writer.add_figure('images/Attention', fig , ep)
def flatten_attention(a, w_size=31):
w_size = (w_size-1)//2 # make it half window size
seq_len = a.shape[0]
n_heads = a.shape[1]
attentions = torch.zeros(seq_len, seq_len)
for t in range(seq_len):
start = 0 if t-w_size<0 else t-w_size
end = seq_len if t+w_size > seq_len else t+w_size
if t<w_size:
attentions[t, start:end+1] = a[t, -(end-start)-1:]
else:
attentions[t, start:end] = a[t, :(end-start)]
return attentions
def train_model(model, ep, loader, optimizer, scheduler, clip_gradient_norm):
model.train()
total_loss = 0
batch_idx = 0
batch_size = loader.batch_size
total_batch = len(loader.dataset)
# print(f'ep = {ep}, lr = {scheduler.get_lr()}')
for batch in loader:
predictions, losses, _ = model.run_on_batch(batch)
loss = sum(losses.values())
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
batch_idx += 1
print(f'Train Epoch: {ep} [{batch_idx*batch_size}/{total_batch}'
f'({100. * batch_idx*batch_size / total_batch:.0f}%)]'
f'\tLoss: {loss.item():.6f}'
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/len(loader):.6f}')
return predictions, losses, optimizer
def train_VAT_model(model, iteration, ep, l_loader, ul_loader, optimizer, scheduler, clip_gradient_norm, alpha, VAT=False, VAT_start=0):
model.train()
batch_size = l_loader.batch_size
total_loss = 0
l_loader = cycle(l_loader)
if ul_loader:
ul_loader = cycle(ul_loader)
for i in range(iteration):
optimizer.zero_grad()
batch_l = next(l_loader)
if (ep < VAT_start) or (VAT==False):
predictions, losses, _ = model.run_on_batch(batch_l,None, False)
else:
batch_ul = next(ul_loader)
predictions, losses, _ = model.run_on_batch(batch_l,batch_ul, VAT)
# loss = sum(losses.values())
loss = 0
for key in losses.keys():
if key.startswith('loss/train_LDS'):
# print(key)
loss += alpha*losses[key]/2 # No need to divide by 2 if you have only _l
else:
loss += losses[key]
# loss = losses['loss/train_frame'] + alpha*(losses['loss/train_LDS_l']+losses['loss/train_LDS_ul'])/2
loss.backward()
total_loss += loss.item()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
print(f'Train Epoch: {ep} [{i*batch_size}/{iteration*batch_size}'
f'({100. * i / iteration:.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}\t"
# + f"".join([f"{k.split('/')[-1]}={v.item():.3e}\t" for k,v in losses.items()])
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/iteration:.6f}')
return predictions, losses, optimizer
def train_VAT_model_application(model, iteration, ep, l_loader, ul_loader, optimizer, scheduler, clip_gradient_norm, alpha, VAT=False, VAT_start=0):
model.train()
batch_size = l_loader.batch_size
total_loss = 0
l_loader = cycle(l_loader)
if ul_loader:
ul_loader = cycle(ul_loader)
for i in range(iteration):
optimizer.zero_grad()
batch_l = next(l_loader)
if (ep < VAT_start) or (VAT==False):
predictions, losses, _ = model.run_on_batch_application(batch_l,None, False)
else:
batch_ul = next(ul_loader)
predictions, losses, _ = model.run_on_batch_application(batch_l,batch_ul, VAT)
# loss = sum(losses.values())
loss = 0
for key in losses.keys():
if key.startswith('loss/train_LDS'):
# print(key)
loss += alpha*losses[key]/2 # No need to divide by 2 if you have only _l
else:
loss += losses[key]
# loss = losses['loss/train_frame'] + alpha*(losses['loss/train_LDS_l']+losses['loss/train_LDS_ul'])/2
loss.backward()
total_loss += loss.item()
optimizer.step()
scheduler.step()
if clip_gradient_norm:
clip_grad_norm_(model.parameters(), clip_gradient_norm)
print(f'Train Epoch: {ep} [{i*batch_size}/{iteration*batch_size}'
f'({100. * i / iteration:.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}\t"
# + f"".join([f"{k.split('/')[-1]}={v.item():.3e}\t" for k,v in losses.items()])
, end='\r')
print(' '*100, end = '\r')
print(f'Train Epoch: {ep}\tLoss: {total_loss/iteration:.6f}')
return predictions, losses, optimizer
def eval_model(model, ep, loader, VAT_start=0, VAT=False):
model.eval()
batch_size = loader.batch_size
metrics = defaultdict(list)
i = 0
for batch in loader:
if ep < VAT_start or VAT==False:
predictions, losses, _ = model.run_on_batch(batch, None, False)
else:
predictions, losses, _ = model.run_on_batch(batch, None, True)
for key, loss in losses.items():
metrics[key].append(loss.item())
print(f'Eval Epoch: {ep} [{i*batch_size}/{len(loader)*batch_size}'
f'({100. * i / len(loader):.0f}%)]'
f"\tMain Loss: {sum(losses.values()):.6f}"
, end='\r')
i += 1
print(' '*100, end = '\r')
return metrics | 31,908 | 45.44687 | 155 | py |
ReconVAT | ReconVAT-master/model/self_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1) | 3,357 | 39.95122 | 135 | py |
ReconVAT | ReconVAT-master/model/utils.py | import sys
from functools import reduce
import torch
from PIL import Image
from torch.nn.modules.module import _addindent
def cycle(iterable):
while True:
for item in iterable:
yield item
def summary(model, file=sys.stdout):
def repr(model):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = model.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
total_params = 0
for key, module in model._modules.items():
mod_str, num_params = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
total_params += num_params
lines = extra_lines + child_lines
for name, p in model._parameters.items():
if hasattr(p, 'shape'):
total_params += reduce(lambda x, y: x * y, p.shape)
main_str = model._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
if file is sys.stdout:
main_str += ', \033[92m{:,}\033[0m params'.format(total_params) #[92m is green color, [0m is black color
else:
main_str += ', {:,} params'.format(total_params)
return main_str, total_params
string, count = repr(model)
if file is not None:
if isinstance(file, str):
file = open(file, 'w')
print(string, file=file)
file.flush()
return count
def save_pianoroll(path, onsets, frames, onset_threshold=0.5, frame_threshold=0.5, zoom=4):
"""
Saves a piano roll diagram
Parameters
----------
path: str
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
zoom: int
"""
onsets = (1 - (onsets.t() > onset_threshold).to(torch.uint8)).cpu()
frames = (1 - (frames.t() > frame_threshold).to(torch.uint8)).cpu()
both = (1 - (1 - onsets) * (1 - frames))
image = torch.stack([onsets, frames, both], dim=2).flip(0).mul(255).numpy()
image = Image.fromarray(image, 'RGB')
image = image.resize((image.size[0], image.size[1] * zoom))
image.save(path)
class Normalization():
"""This class is for normalizing the spectrograms batch by batch. The normalization used is min-max, two modes 'framewise' and 'imagewise' can be selected. In this paper, we found that 'imagewise' normalization works better than 'framewise'"""
def __init__(self, mode='framewise'):
if mode == 'framewise':
def normalize(x):
size = x.shape
x_max = x.max(1, keepdim=True)[0] # Finding max values for each frame
x_min = x.min(1, keepdim=True)[0]
output = (x-x_min)/(x_max-x_min) # If there is a column with all zero, nan will occur
output[torch.isnan(output)]=0 # Making nan to 0
return output
elif mode == 'imagewise':
def normalize(x):
size = x.shape
x_max = x.view(size[0], size[1]*size[2]).max(1, keepdim=True)[0]
x_min = x.view(size[0], size[1]*size[2]).min(1, keepdim=True)[0]
x_max = x_max.unsqueeze(1) # Make it broadcastable
x_min = x_min.unsqueeze(1) # Make it broadcastable
return (x-x_min)/(x_max-x_min)
else:
print(f'please choose the correct mode')
self.normalize = normalize
def transform(self, x):
return self.normalize(x)
| 3,934 | 35.775701 | 247 | py |
ReconVAT | ReconVAT-master/model/dataset.py | import json
import os
from abc import abstractmethod
from glob import glob
import sys
import pickle
import pandas as pd
import numpy as np
import soundfile
from torch.utils.data import Dataset
from tqdm import tqdm
from .constants import *
from .midi import parse_midi
class PianoRollAudioDataset(Dataset):
def __init__(self, path, groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
self.path = path
self.groups = groups if groups is not None else self.available_groups()
self.sequence_length = sequence_length
self.device = device
self.random = np.random.RandomState(seed)
self.refresh = refresh
self.data = []
print(f"Loading {len(self.groups)} group{'s' if len(self.groups) > 1 else ''} "
f"of {self.__class__.__name__} at {path}")
for group in self.groups:
for input_files in tqdm(self.files(group), desc='Loading group %s' % group): #self.files is defined in MAPS class
self.data.append(self.load(*input_files)) # self.load is a function defined below. It first loads all data into memory first
def __getitem__(self, index):
data = self.data[index]
result = dict(path=data['path'])
if self.sequence_length is not None:
audio_length = len(data['audio'])
step_begin = self.random.randint(audio_length - self.sequence_length) // HOP_LENGTH
# print(f'step_begin = {step_begin}')
n_steps = self.sequence_length // HOP_LENGTH
step_end = step_begin + n_steps
begin = step_begin * HOP_LENGTH
# print(f'begin = {begin}')
end = begin + self.sequence_length
result['audio'] = data['audio'][begin:end].to(self.device)
result['label'] = data['label'][step_begin:step_end, :].to(self.device)
result['velocity'] = data['velocity'][step_begin:step_end, :].to(self.device)
result['start_idx'] = begin
else:
result['audio'] = data['audio'].to(self.device)
result['label'] = data['label'].to(self.device)
result['velocity'] = data['velocity'].to(self.device).float()
result['audio'] = result['audio'].float().div_(32768.0) # converting to float by dividing it by 2^15
result['onset'] = (result['label'] == 3).float()
result['offset'] = (result['label'] == 1).float()
result['frame'] = (result['label'] > 1).float()
result['velocity'] = result['velocity'].float().div_(128.0)
# print(f"result['audio'].shape = {result['audio'].shape}")
# print(f"result['label'].shape = {result['label'].shape}")
return result
def __len__(self):
return len(self.data)
@classmethod # This one seems optional?
@abstractmethod # This is to make sure other subclasses also contain this method
def available_groups(cls):
"""return the names of all available groups"""
raise NotImplementedError
@abstractmethod
def files(self, group):
"""return the list of input files (audio_filename, tsv_filename) for this group"""
raise NotImplementedError
def load(self, audio_path, tsv_path):
"""
load an audio track and the corresponding labels
Returns
-------
A dictionary containing the following data:
path: str
the path to the audio file
audio: torch.ShortTensor, shape = [num_samples]
the raw waveform
label: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains the onset/offset/frame labels encoded as:
3 = onset, 2 = frames after onset, 1 = offset, 0 = all else
velocity: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains MIDI velocity values at the frame locations
"""
saved_data_path = audio_path.replace('.flac', '.pt').replace('.wav', '.pt')
if os.path.exists(saved_data_path) and self.refresh==False: # Check if .pt files exist, if so just load the files
return torch.load(saved_data_path)
# Otherwise, create the .pt files
audio, sr = soundfile.read(audio_path, dtype='int16')
assert sr == SAMPLE_RATE
audio = torch.ShortTensor(audio) # convert numpy array to pytorch tensor
audio_length = len(audio)
n_keys = MAX_MIDI - MIN_MIDI + 1
n_steps = (audio_length - 1) // HOP_LENGTH + 1 # This will affect the labels time steps
label = torch.zeros(n_steps, n_keys, dtype=torch.uint8)
velocity = torch.zeros(n_steps, n_keys, dtype=torch.uint8)
tsv_path = tsv_path
midi = np.loadtxt(tsv_path, delimiter='\t', skiprows=1)
# print(f'audio size = {audio.shape}')
# print(f'label size = {label.shape}')
for onset, offset, note, vel in midi:
left = int(round(onset * SAMPLE_RATE / HOP_LENGTH)) # Convert time to time step
onset_right = min(n_steps, left + HOPS_IN_ONSET) # Ensure the time step of onset would not exceed the last time step
frame_right = int(round(offset * SAMPLE_RATE / HOP_LENGTH))
frame_right = min(n_steps, frame_right) # Ensure the time step of frame would not exceed the last time step
offset_right = min(n_steps, frame_right + HOPS_IN_OFFSET)
f = int(note) - MIN_MIDI
label[left:onset_right, f] = 3
label[onset_right:frame_right, f] = 2
label[frame_right:offset_right, f] = 1
velocity[left:frame_right, f] = vel
data = dict(path=audio_path, audio=audio, label=label, velocity=velocity)
torch.save(data, saved_data_path)
return data
class MAESTRO(PianoRollAudioDataset):
def __init__(self, path='../../public_data/MAESTRO/', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train', 'validation', 'test']
def files(self, group):
if group not in self.available_groups():
# year-based grouping
flacs = sorted(glob(os.path.join(self.path, group, '*.flac')))
if len(flacs) == 0:
flacs = sorted(glob(os.path.join(self.path, group, '*.wav')))
midis = sorted(glob(os.path.join(self.path, group, '*.midi')))
files = list(zip(flacs, midis))
if len(files) == 0:
raise RuntimeError(f'Group {group} is empty')
else:
metadata = json.load(open(os.path.join(self.path, 'maestro-v2.0.0.json')))
files = sorted([(os.path.join(self.path, row['audio_filename'].replace('.wav', '.flac')),
os.path.join(self.path, row['midi_filename'])) for row in metadata if row['split'] == group])
files = [(audio if os.path.exists(audio) else audio.replace('.flac', '.wav'), midi) for audio, midi in files]
result = []
for audio_path, midi_path in files:
tsv_filename = midi_path.replace('.midi', '.tsv').replace('.mid', '.tsv')
if not os.path.exists(tsv_filename):
midi = parse_midi(midi_path)
np.savetxt(tsv_filename, midi, fmt='%.6f', delimiter='\t', header='onset,offset,note,velocity')
result.append((audio_path, tsv_filename))
return result
class MAPS(PianoRollAudioDataset):
def __init__(self, path='./MAPS', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups if groups is not None else ['ENSTDkAm', 'ENSTDkCl'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['AkPnBcht', 'AkPnBsdf', 'AkPnCGdD', 'AkPnStgb', 'ENSTDkAm', 'ENSTDkCl', 'SptkBGAm', 'SptkBGCl', 'StbgTGd2']
def files(self, group):
flacs = glob(os.path.join(self.path, 'flac', '*_%s.flac' % group))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs))
class MusicNet(PianoRollAudioDataset):
def __init__(self, path='./MusicNet', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train', 'test']
def read_id(self, path, group, mode):
train_meta = pd.read_csv(os.path.join(path,f'{mode}_metadata.csv'))
return train_meta[train_meta['ensemble'].str.contains(group)]['id'].values
def appending_flac_tsv(self, id_list, mode):
flacs = []
tsvs = []
for i in id_list:
flacs.extend(glob(os.path.join(self.path, f"{mode}_data", f"{i}.flac")))
tsvs.extend(glob(os.path.join(self.path, f"tsv_{mode}_labels/{i}.tsv")))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return flacs, tsvs
def files(self, group):
string_keys = ['Solo Violin', 'Violin and Harpsichord',
'Accompanied Violin', 'String Quartet',
'String Sextet', 'Viola Quintet',
'Solo Cello', 'Accompanied Cello']
wind_keys = ['Accompanied Clarinet', 'Clarinet Quintet',
'Pairs Clarinet-Horn-Bassoon', 'Clarinet-Cello-Piano Trio',
'Wind Octet', 'Wind Quintet']
train_meta = pd.read_csv(os.path.join(self.path,f'train_metadata.csv'))
if group == 'small test':
types = ('2303.flac', '2382.flac', '1819.flac')
flacs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'test_data', i)))
flacs = sorted(flacs)
tsvs = sorted(glob(os.path.join(self.path, f'tsv_test_labels/*.tsv')))
elif group == 'train_string_l':
types = np.array([0])
for key in string_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:1]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_string_ul':
types = np.array([0])
for key in string_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[1:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_violin_l':
type1 = self.read_id(self.path, 'Solo Violin', 'train')
type2 = self.read_id(self.path, 'Accompanied Violin', 'train')
types = np.concatenate((type1,type2))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_violin_ul':
type1 = self.read_id(self.path, 'String Quartet', 'train')
type2 = self.read_id(self.path, 'String Sextet', 'train')
types = np.concatenate((type1,type2))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_violin':
types = ('2106', '2191', '2298', '2628')
flacs, tsvs = self.appending_flac_tsv(types, 'test')
elif group == 'train_wind_l':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:1]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_wind_ul':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[1:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_wind':
types = ('1819', '2416')
flacs, tsvs = self.appending_flac_tsv(types, 'test')
elif group == 'train_flute_l':
types = ('2203',)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'train_flute_ul':
types = np.array([0])
for key in wind_keys:
l= train_meta[train_meta['ensemble'].str.contains(key)]['id'].values[:]
types = np.concatenate((types,l))
types = np.delete(types, 0)
types = np.concatenate((types,('2203',)))
flacs, tsvs = self.appending_flac_tsv(types, 'train')
elif group == 'test_flute':
types = ('2204',)
flacs, tsvs = self.appending_flac_tsv(types, 'train')
else:
types = self.read_id(self.path, group, 'train')
flacs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'train_data', f"{i}.flac")))
flacs = sorted(flacs)
tsvs = sorted(glob(os.path.join(self.path, f'tsv_train_labels/*.tsv')))
# else:
# flacs = sorted(glob(os.path.join(self.path, f'{group}_data/*.flac')))
# tsvs = sorted(glob(os.path.join(self.path, f'tsv_{group}_labels/*.tsv')))
# else:
# flacs = sorted(glob(os.path.join(self.path, f'{group}_data/*.flac')))
# tsvs = sorted(glob(os.path.join(self.path, f'tsv_{group}_labels/*.tsv')))
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return zip(flacs, tsvs)
class Guqin(PianoRollAudioDataset):
def __init__(self, path='./Guqin', groups=None, sequence_length=None, seed=42, refresh=False, device='cpu'):
super().__init__(path, groups if groups is not None else ['train'], sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['train_l', 'train_ul', 'test']
def read_id(self, path, group, mode):
train_meta = pd.read_csv(os.path.join(path,f'{mode}_metadata.csv'))
return train_meta[train_meta['ensemble'].str.contains(group)]['id'].values
def appending_flac_tsv(self, id_list, mode):
flacs = []
tsvs = []
for i in id_list:
flacs.extend(glob(os.path.join(self.path, f"{mode}_data", f"{i}.flac")))
tsvs.extend(glob(os.path.join(self.path, f"tsv_{mode}_labels/{i}.tsv")))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return flacs, tsvs
def files(self, group):
if group=='train_l':
types = ['jiou', 'siang', 'ciou', 'yi', 'yu', 'feng', 'yang']
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return zip(flacs, tsvs)
elif group == 'train_ul':
types = [
]
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
return zip(flacs, tsvs)
elif group == 'test':
types = ['gu', 'guan', 'liang',
]
flacs = []
tsvs = []
for i in types:
flacs.extend(glob(os.path.join(self.path, 'audio', i + '.flac')))
tsvs.extend(glob(os.path.join(self.path, 'tsv_label', i + '.tsv')))
flacs = sorted(flacs)
tsvs = sorted(tsvs)
print(f'flacs = {flacs}')
print(f'tsvs = {tsvs}')
return zip(flacs, tsvs)
else:
raise Exception("Please choose a valid group")
class Corelli(PianoRollAudioDataset):
def __init__(self, path='./Application_String', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups, sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['op6_no1', 'op6_no2', 'op6_no3']
def files(self, group):
flacs = glob(os.path.join(self.path, group, '*.flac'))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs))
class Application_Dataset(Dataset):
def __init__(self, path, seed=42, device='cpu'):
self.path = path
self.device = device
self.data = []
for input_files in tqdm(self.files(path), desc='Loading files'): #self.files is defined in MAPS class
self.data.append(self.load(input_files)) # self.load is a function defined below. It first loads all data into memory first
def __getitem__(self, index):
data = self.data[index]
result = dict(path=data['path'])
audio_length = len(data['audio'])
result['audio'] = data['audio'].to(self.device)
result['audio'] = result['audio'].float().div_(32768.0) # converting to float by dividing it by 2^15
return result
def __len__(self):
return len(self.data)
@abstractmethod
def files(self, group):
# Only need to load flac files
flacs = glob(os.path.join(self.path, '*.flac'))
flacs.extend(glob(os.path.join(self.path, '*.wav'))) # If there are wav files, also load them
assert(all(os.path.isfile(flac) for flac in flacs))
return flacs
def load(self, audio_path):
"""
load an audio track and the corresponding labels
Returns
-------
A dictionary containing the following data:
path: str
the path to the audio file
audio: torch.ShortTensor, shape = [num_samples]
the raw waveform
label: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains the onset/offset/frame labels encoded as:
3 = onset, 2 = frames after onset, 1 = offset, 0 = all else
velocity: torch.ByteTensor, shape = [num_steps, midi_bins]
a matrix that contains MIDI velocity values at the frame locations
"""
saved_data_path = audio_path.replace('.flac', '.pt').replace('.wav', '.pt')
# Otherwise, create the .pt files
audio, sr = soundfile.read(audio_path, dtype='int16')
#
assert sr == SAMPLE_RATE, f'Please make sure the sampling rate is 16k.\n{saved_data_path} has a sampling of {sr}'
audio = torch.ShortTensor(audio) # convert numpy array to pytorch tensor
audio_length = len(audio)
data = dict(path=audio_path, audio=audio)
return data
class Application_Wind(PianoRollAudioDataset):
def __init__(self, path='./Application_Wind', groups=None, sequence_length=None, overlap=True,
seed=42, refresh=False, device='cpu', supersmall=False):
self.overlap = overlap
self.supersmall = supersmall
super().__init__(path, groups, sequence_length, seed, refresh, device)
@classmethod
def available_groups(cls):
return ['dummy']
def files(self, group):
flacs = glob(os.path.join(self.path, '*.flac'))
if self.overlap==False:
with open('overlapping.pkl', 'rb') as f:
test_names = pickle.load(f)
filtered_flacs = []
for i in flacs:
if any([substring in i for substring in test_names]):
pass
else:
filtered_flacs.append(i)
flacs = sorted(filtered_flacs)
if self.supersmall==True:
# print(sorted(filtered_flacs))
flacs = [sorted(filtered_flacs)[3]]
# tsvs = [f.replace('/flac/', '/tsv/matched/').replace('.flac', '.tsv') for f in flacs]
tsvs = [f.replace('/flac/', '/tsvs/').replace('.flac', '.tsv') for f in flacs]
# print(flacs)
assert(all(os.path.isfile(flac) for flac in flacs))
assert(all(os.path.isfile(tsv) for tsv in tsvs))
return sorted(zip(flacs, tsvs)) | 22,968 | 40.914234 | 140 | py |
ReconVAT | ReconVAT-master/model/self_attenttion_model.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class MutliHeadAttention2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), groups=1, bias=False):
"""kernel_size is the 2D local attention window size"""
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding_time = (kernel_size[0]-1)//2
self.padding_freq = (kernel_size[1]-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
# Relative position encoding
self.rel_t = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size[0], 1), requires_grad=True)
self.rel_f = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size[1]), requires_grad=True)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding_freq, self.padding_freq, self.padding_time, self.padding_time])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
v_out = v_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
k_out_t, k_out_f = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_t + self.rel_t, k_out_f + self.rel_f), dim=1) # relative position?
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
# (batch, n_heads, feature_per_head, H, W, local H X W)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
# (batch, n_heads, feature_per_head, H, W, 1)
# Alternative way to express dot product
# same as k_out = k_out.permute(0,1,3,4,2,5)
# and then energy = torch.matmul(q_out,k_out)
energy = (q_out * k_out).sum(dim=2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, n_heads, 1, H, W, local HXW)
out = attention*v_out
# (batch, n_heads, feature_per_head, H, W, local HXW)
# (batch, c, H, W)
return out.sum(-1).flatten(1,2), attention.squeeze(2)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_t, 0, 1)
init.normal_(self.rel_f, 0, 1)
class ConvStack(nn.Module):
def __init__(self, input_features, output_features):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
self.fc = nn.Sequential(
nn.Linear((output_features // 8) * (input_features // 4), output_features),
nn.Dropout(0.5)
)
def forward(self, spec):
x = spec.view(spec.size(0), 1, spec.size(1), spec.size(2))
x = self.cnn(x)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
class Onset_Stack(nn.Module):
def __init__(self, input_features, model_size, output_features, sequence_model):
super().__init__()
self.convstack = ConvStack(input_features, model_size)
self.sequence_model = sequence_model
self.linear = nn.Linear(model_size, output_features)
def forward(self, x):
x = self.convstack(x)
x, a = self.sequence_model(x)
x = self.linear(x)
return torch.sigmoid(x), a
class Combine_Stack_with_attn(nn.Module):
def __init__(self, model_size, output_features, sequence_model, attention_mode, w_size):
super().__init__()
self.sequence_model = sequence_model
self.w_size = w_size
self.linear = nn.Linear(model_size, output_features)
def forward(self, x):
x, a = self.sequence_model(x)
x = self.linear(x)
return torch.sigmoid(x), a
class OnsetsAndFrames_self_attention(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, log=True, mode='imagewise', spec='Mel', device='cpu', attention_mode='activation', w_size=30, n_heads=8, onset_stack=True, LSTM=True):
super().__init__()
self.onset_stack=onset_stack
self.w_size=w_size
self.device = device
self.log = log
self.normalize = Normalization(mode)
self.attention_mode=attention_mode
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
model_size = model_complexity * 16
sequence_model = lambda input_size, output_size: MutliHeadAttention1D(in_features=input_size,
out_features=output_size,
kernel_size=w_size,
groups=n_heads)
self.combined_stack = Combine_Stack_with_attn(model_size, output_features,
sequence_model(output_features * 2,
model_size),
attention_mode,
w_size)
self.onset_stack = Onset_Stack(input_features, model_size, output_features,
sequence_model(model_size, model_size))
self.frame_stack = nn.Sequential(
ConvStack(input_features, model_size),
nn.Linear(model_size, output_features),
nn.Sigmoid()
)
def forward(self, spec):
onset_pred, onset_attention = self.onset_stack(spec)
seq_len = onset_pred.shape[1]
# offset_pred = self.offset_stack(mel)
activation_pred = self.frame_stack(spec)
combined_pred = torch.cat([onset_pred.detach(), activation_pred], dim=-1)
# hidden = (h,c) # Setting the first hidden state to be the output from onset stack
frame_pred, combined_attention = self.combined_stack(combined_pred) # Attenting on onset
return onset_pred, activation_pred, frame_pred, combined_attention
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
# offset_label = batch['offset']
frame_label = batch['frame']
# velocity_label = batch['velocity']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
if self.onset_stack:
predictions = {
# 'onset': onset_pred.reshape(*onset_label.shape),
'onset': onset_pred.reshape(*onset_label.shape),
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
# 'loss/offset': F.binary_cross_entropy(predictions['offset'], offset_label),
'loss/frame': F.binary_cross_entropy(predictions['frame'], frame_label),
# 'loss/velocity': self.velocity_loss(predictions['velocity'], velocity_label, onset_label)
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'activation': activation_pred,
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
losses = {
'loss/frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class simple_onset_frame(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model_onset = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_onset = nn.LayerNorm(model_complexity)
self.linear_onset = nn.Linear(model_complexity, output_features)
self.sequence_model_frame = MutliHeadAttention1D(in_features=model_complexity+output_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
self.layer_norm_frame = nn.LayerNorm(model_complexity)
self.linear_frame = nn.Linear(model_complexity, output_features)
def forward(self, spec):
x, a = self.sequence_model_onset(spec)
x = self.layer_norm_onset(x)
onset_pred = torch.sigmoid(self.linear_onset(x))
# Version 1 try concate
x = torch.cat((onset_pred, x), -1)
# Version 2 try add
x, _ = self.sequence_model_frame(x)
x = self.layer_norm_frame(x)
frame_pred = torch.sigmoid(self.linear_frame(x))
return frame_pred, onset_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, onset_pred, a = self(spec)
predictions = {
'onset': onset_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
if self.training:
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class standalone_self_attention_1D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=48, w_size=31,
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True, layernorm_pos=None):
super().__init__()
self.w_size=w_size
self.log = log
self.layernorm_pos = layernorm_pos
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention1D(in_features=input_features,
out_features=model_complexity,
kernel_size=w_size,
position=position,
groups=n_heads)
if layernorm_pos=='Before':
self.layer_norm = nn.LayerNorm(model_complexity)
elif layernorm_pos=='After':
self.layer_norm = nn.LayerNorm(output_features)
self.linear = nn.Linear(model_complexity, output_features)
def forward(self, spec):
x, a = self.sequence_model(spec)
if self.layernorm_pos=='Before':
x = self.layer_norm(x)
x = self.linear(x)
if self.layernorm_pos=='After':
x = self.layer_norm(x)
frame_pred = torch.sigmoid(x)
return frame_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, a = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'attention': a
}
if self.training:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class standalone_self_attention_2D(nn.Module):
def __init__(self, input_features, output_features, model_complexity=16, w_size=(3,3),
log=True, mode='imagewise', spec='Mel', n_heads=8, position=True):
super().__init__()
self.w_size=w_size
self.log = log
self.normalize = Normalization(mode)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.sequence_model = MutliHeadAttention2D(in_channels=input_features,
out_channels=model_complexity,
kernel_size=w_size,
stride=(1,1),
groups=1, bias=False)
self.linear = nn.Linear(N_BINS*model_complexity, output_features)
def forward(self, spec):
spec = spec.unsqueeze(1)
x, a = self.sequence_model(spec)
x = x.transpose(1,2).flatten(2)
frame_pred = torch.sigmoid(self.linear(x))
return frame_pred, a
def run_on_batch(self, batch):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
frame_pred, a = self(spec)
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
# 'activation': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
# 'attention': a
}
if self.training:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
else:
losses = {
# 'loss/onset': F.binary_cross_entropy(predictions['onset'], onset_label),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'], frame_label),
}
return predictions, losses, spec
def feed_audio(self, audio):
# velocity_label = batch['velocity']
spec = self.spectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
# print(f'spec shape = {spec.shape}')
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2) # swap spec bins with timesteps so that it fits LSTM later # shape (8,640,229)
onset_pred, activation_pred, frame_pred, a = self(spec)
predictions = {
'onset': onset_pred,
# 'offset': offset_pred.reshape(*offset_label.shape),
'activation': activation_pred,
'frame': frame_pred,
'attention': a
# 'velocity': velocity_pred.reshape(*velocity_label.shape)
}
return predictions, spec
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param) | 29,286 | 40.958453 | 211 | py |
ReconVAT | ReconVAT-master/model/decoding.py | import numpy as np
import torch
def extract_notes_wo_velocity(onsets, frames, onset_threshold=0.5, frame_threshold=0.5, rule='rule1'):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
velocity: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
Returns
-------
pitches: np.ndarray of bin_indices
intervals: np.ndarray of rows containing (onset_index, offset_index)
velocities: np.ndarray of velocity values
"""
onsets = (onsets > onset_threshold).cpu().to(torch.uint8)
frames = (frames > frame_threshold).cpu().to(torch.uint8)
onset_diff = torch.cat([onsets[:1, :], onsets[1:, :] - onsets[:-1, :]], dim=0) == 1 # Make sure the activation is only 1 time-step
if rule=='rule2':
pass
elif rule=='rule1':
# Use in simple models
onset_diff = onset_diff & (frames==1) # New condition such that both onset and frame on to get a note
else:
raise NameError('Please enter the correct rule name')
pitches = []
intervals = []
for nonzero in torch.nonzero(onset_diff, as_tuple=False):
frame = nonzero[0].item()
pitch = nonzero[1].item()
onset = frame
offset = frame
# This while loop is looking for where does the note ends
while onsets[offset, pitch].item() or frames[offset, pitch].item():
offset += 1
if offset == onsets.shape[0]:
break
# After knowing where does the note start and end, we can return the pitch information (and velocity)
if offset > onset:
pitches.append(pitch)
intervals.append([onset, offset])
return np.array(pitches), np.array(intervals)
def extract_notes(onsets, frames, velocity, onset_threshold=0.5, frame_threshold=0.5):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
velocity: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
Returns
-------
pitches: np.ndarray of bin_indices
intervals: np.ndarray of rows containing (onset_index, offset_index)
velocities: np.ndarray of velocity values
"""
onsets = (onsets > onset_threshold).cpu().to(torch.uint8)
frames = (frames > frame_threshold).cpu().to(torch.uint8)
onset_diff = torch.cat([onsets[:1, :], onsets[1:, :] - onsets[:-1, :]], dim=0) == 1 # Make sure the activation is only 1 time-step
pitches = []
intervals = []
velocities = []
for nonzero in onset_diff.nonzero():
frame = nonzero[0].item()
pitch = nonzero[1].item()
onset = frame
offset = frame
velocity_samples = []
# This while loop is looking for where does the note ends
while onsets[offset, pitch].item() or frames[offset, pitch].item():
if onsets[offset, pitch].item():
velocity_samples.append(velocity[offset, pitch].item())
offset += 1
if offset == onsets.shape[0]:
break
# After knowing where does the note start and end, we can return the pitch information (and velocity)
if offset > onset:
pitches.append(pitch)
intervals.append([onset, offset])
velocities.append(np.mean(velocity_samples) if len(velocity_samples) > 0 else 0)
return np.array(pitches), np.array(intervals), np.array(velocities)
def notes_to_frames(pitches, intervals, shape):
"""
Takes lists specifying notes sequences and return
Parameters
----------
pitches: list of pitch bin indices
intervals: list of [onset, offset] ranges of bin indices
shape: the shape of the original piano roll, [n_frames, n_bins]
Returns
-------
time: np.ndarray containing the frame indices
freqs: list of np.ndarray, each containing the frequency bin indices
"""
roll = np.zeros(tuple(shape))
for pitch, (onset, offset) in zip(pitches, intervals):
roll[onset:offset, pitch] = 1
time = np.arange(roll.shape[0])
freqs = [roll[t, :].nonzero()[0] for t in time]
return time, freqs
| 4,479 | 33.198473 | 134 | py |
ReconVAT | ReconVAT-master/model/UNet_onset.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
from itertools import cycle
def create_triangular_cycle(start, end, period):
triangle_a = torch.linspace(start,end,period)
triangle_b = torch.linspace(end,start,period)[1:-1]
triangle=torch.cat((triangle_a,triangle_b))
return cycle(triangle)
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
super().__init__()
self.out_features = out_features
self.kernel_size = kernel_size
self.stride = stride
self.position = position
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding = (kernel_size-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_features % self.groups == 0, f"out_channels should be divided by groups. (example: out_channels: 40, groups: 4). Now out_channels={self.out_features}, groups={self.groups}"
assert (kernel_size-1) % 2 == 0, "kernal size must be odd number"
if self.position:
# Relative position encoding
self.rel = nn.Parameter(torch.randn(1, out_features, kernel_size), requires_grad=True)
# Input shape = (batch, len, feat)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.W_k = nn.Linear(in_features, out_features, bias=bias)
self.W_q = nn.Linear(in_features, out_features, bias=bias)
self.W_v = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, seq_len, feat_dim = x.size()
padded_x = F.pad(x, [0, 0, self.padding, self.padding])
q_out = self.W_q(x)
k_out = self.W_k(padded_x)
v_out = self.W_v(padded_x)
k_out = k_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
v_out = v_out.unfold(1, self.kernel_size, self.stride)
# (batch, L, feature, local_window)
if self.position:
k_out = k_out + self.rel # relative position?
k_out = k_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
v_out = v_out.contiguous().view(batch, seq_len, self.groups, self.out_features // self.groups, -1)
# (batch, L, n_heads, feature_per_head, local_window)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, seq_len, self.groups, self.out_features // self.groups, 1)
# (batch, L, n_heads, feature_per_head, 1)
energy = (q_out * k_out).sum(-2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, L, n_heads, 1, local_window)
out = attention*v_out
# out = torch.einsum('blnhk,blnhk -> blnh', attention, v_out).view(batch, seq_len, -1)
# (batch, c, H, W)
return out.sum(-1).flatten(2), attention.squeeze(3)
def reset_parameters(self):
init.kaiming_normal_(self.W_k.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_v.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.W_q.weight, mode='fan_out', nonlinearity='relu')
if self.position:
init.normal_(self.rel, 0, 1)
class UNet_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
frame_ref, onset_ref, _ = model.transcriber(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
frame_pred, onset_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
frame_loss = F.binary_cross_entropy(frame_pred, frame_ref)
onset_loss = F.binary_cross_entropy(onset_pred, onset_ref)
loss = (frame_loss + onset_loss)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
frame_pred, onset_pred, _ = model.transcriber(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_frame_loss = F.binary_cross_entropy(frame_pred, frame_ref)
vat_onset_loss = F.binary_cross_entropy(onset_pred, onset_ref)
vat_loss = {'frame': vat_frame_loss,
'onset': vat_onset_loss}
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
def binary_kl_div(y_pred, y_ref):
y_pred = torch.clamp(y_pred, 1e-4, 0.9999) # prevent inf in kl_div
y_ref = torch.clamp(y_ref, 1e-4, 0.9999)
q = torch.stack((y_pred, 1-y_pred), -1)
p = torch.stack((y_ref, 1-y_ref), -1)
assert torch.isnan(p.log()).any()==False, "r_adv exploded, please debug tune down the XI for VAT"
assert torch.isinf(p.log()).any()==False, "r_adv vanished, please debug tune up the XI for VAT"
return F.kl_div(p.log(), q, reduction='batchmean')
batchNorm_momentum = 0.1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride, num_instruments):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Stack(nn.Module):
def __init__(self, input_size, hidden_dim, attn_size=31, attn_group=4, output_dim=88, dropout=0.5):
super().__init__()
self.attention = MutliHeadAttention1D(input_size, hidden_dim, attn_size, position=True, groups=attn_group)
self.linear = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x, a = self.attention(x)
x = self.linear(x)
x = self.dropout(x)
return x, a
class Spec2Roll(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride, 2)
self.lstm1 = MutliHeadAttention1D(N_BINS+88, N_BINS*complexity, 31, position=True, groups=complexity)
# self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
self.linear1 = nn.Linear(N_BINS*complexity, 88)
self.linear_onset = nn.Linear(N_BINS, 88)
self.linear_feature = nn.Linear(N_BINS, 88)
self.dropout_layer = nn.Dropout(0.5)
# self.onset_stack = Stack(input_size=N_BINS, hidden_dim=768, attn_size=31, attn_group=4, output_dim=88, dropout=0)
# self.feat_stack = Stack(input_size=N_BINS, hidden_dim=768, attn_size=31, attn_group=4, output_dim=88, dropout=0)
self.combine_stack = Stack(input_size=88*2, hidden_dim=768, attn_size=31, attn_group=6, output_dim=88, dropout=0)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
onset = self.linear_onset(x[:,0])
onset = torch.sigmoid(onset)
feat = self.linear_feature(x[:,1])
x = torch.cat((onset, feat), -1)
x, a = self.combine_stack(x)
pianoroll = torch.sigmoid(x)
return pianoroll, onset, a
class Roll2Spec(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
self.Unet2_decoder = Decoder(ds_ksize, ds_stride, 1)
# self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
self.lstm2 = MutliHeadAttention1D(88, N_BINS*complexity, 31, position=True, groups=4)
self.linear2 = nn.Linear(N_BINS*complexity, N_BINS)
def forward(self, x):
# U-net 2
x, a = self.lstm2(x)
x= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
x,s,c = self.Unet2_encoder(x.unsqueeze(1))
reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# x,s,c = self.Unet2_encoder(x.unsqueeze(1))
# x = self.Unet2_decoder(x,s,c) # predict roll
# x, a = self.lstm2(x.squeeze(1))
# reconstruction = self.linear2(x) # ToDo, remove the sigmoid activation and see if we get a better result
# reconstruction = reconstruction.clamp(0,1).unsqueeze(1)
return reconstruction, a
class UNet_Onset(nn.Module):
def __init__(self, ds_ksize, ds_stride, log=True, reconstruction=True, mode='imagewise', spec='CQT', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.reconstruction = reconstruction
self.vat_loss = UNet_VAT(XI, eps, 1, False)
# self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
# self.lstm1 = MutliHeadAttention1D(N_BINS, N_BINS*4, 31, position=True, groups=4)
# # self.lstm1 = nn.LSTM(N_BINS, N_BINS, batch_first=True, bidirectional=True)
# self.linear1 = nn.Linear(N_BINS*4, 88)
self.transcriber = Spec2Roll(ds_ksize, ds_stride)
if reconstruction==True:
# self.Unet2_encoder = Encoder(ds_ksize, ds_stride)
# self.Unet2_decoder = Decoder(ds_ksize, ds_stride)
# # self.lstm2 = nn.LSTM(88, N_BINS, batch_first=True, bidirectional=True)
# self.lstm2 = MutliHeadAttention1D(88, N_BINS*4, 31, position=True, groups=4)
# self.linear2 = nn.Linear(N_BINS*4, N_BINS)
self.reconstructor = Roll2Spec(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
pianoroll, onset, a = self.transcriber(x)
if self.reconstruction:
# U-net 2
reconstruction, a_reconstruct = self.reconstructor(pianoroll)
# Applying U-net 1 to the reconstructed spectrograms
pianoroll2, onset2, a_2 = self.transcriber(reconstruction)
# # U-net2
# x, h = self.lstm2(pianoroll)
# feat2= torch.sigmoid(self.linear2(x)) # ToDo, remove the sigmoid activation and see if we get a better result
# x,s,c = self.Unet2_encoder(feat2.unsqueeze(1))
# reconstruction = self.Unet2_decoder(x,s,c) # predict roll
# # Applying U-net 1 to the reconstructed spectrograms
# x,s,c = self.Unet1_encoder(reconstruction)
# feat1b = self.Unet1_decoder(x,s,c)
# x, h = self.lstm1(feat1b.squeeze(1)) # remove the channel dim
# pianoroll2 = torch.sigmoid(self.linear1(x)) # Use the full LSTM output
return reconstruction, pianoroll, onset, pianoroll2, onset2, a
else:
return pianoroll, onset, a
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = {'frame': torch.tensor(0.),
'onset': torch.tensor(0.)}
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = {'frame': torch.tensor(0.),
'onset': torch.tensor(0.)}
r_norm_l = torch.tensor(0.)
if frame_label.dim()==2:
frame_label=frame_label.unsqueeze(0)
if onset_label.dim()==2:
onset_label=onset_label.unsqueeze(0)
if self.reconstruction:
reconstrut, pianoroll, onset, pianoroll2, onset2, a = self(spec)
if self.training:
predictions = {
'frame': pianoroll,
'onset': onset,
'frame2':pianoroll2,
'onset2':onset2,
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/train_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/train_onset2': F.binary_cross_entropy(predictions['onset2'].squeeze(1), onset_label),
'loss/train_LDS_l_frame': lds_l['frame'],
'loss/train_LDS_l_onset': lds_l['onset'],
'loss/train_LDS_ul_frame': lds_ul['frame'],
'loss/train_LDS_ul_onset': lds_ul['onset'],
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'frame': pianoroll.reshape(*frame_label.shape),
'onset': onset.reshape(*onset.shape),
'frame2':pianoroll2.reshape(*frame_label.shape),
'onset2':onset2.reshape(*onset.shape),
'attention': a,
'r_adv': r_adv,
'reconstruction': reconstrut,
}
losses = {
'loss/test_reconstruction': F.mse_loss(reconstrut.squeeze(1), spec.squeeze(1).detach()),
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_frame2': F.binary_cross_entropy(predictions['frame2'].squeeze(1), frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/test_onset2': F.binary_cross_entropy(predictions['onset2'].squeeze(1), onset_label),
'loss/test_LDS_l_frame': lds_l['frame'],
'loss/test_LDS_l_onset': lds_l['onset'],
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
else:
frame_pred, onset, a = self(spec)
if self.training:
predictions = {
'onset': onset,
'frame': frame_pred,
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/train_LDS_l_frame': lds_l['frame'],
'loss/train_LDS_l_onset': lds_l['onset'],
'loss/train_LDS_ul_frame': lds_ul['frame'],
'loss/train_LDS_ul_onset': lds_ul['onset'],
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': onset.reshape(*onset.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
'attention': a,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_onset': F.binary_cross_entropy(predictions['onset'].squeeze(1), onset_label),
'loss/test_LDS_l_frame': lds_l['frame'],
'loss/test_LDS_l_onset': lds_l['onset'],
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameterds
param = param.data
own_state[name].copy_(param)
| 25,563 | 45.144404 | 196 | py |
ReconVAT | ReconVAT-master/model/Spectrogram.py | """
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import scipy # used only in CFP
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
inverse : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cpu'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,
fmin=50, fmax=6000, sr=22050, trainable=False,
output_format="Complex", verbose=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
if iSTFT:
self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))
self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable==False:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if self.trainable==True:
wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)
wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == 'reflect':
if self.num_samples < self.pad_amount:
raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).")
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, :self.freq_bins, :]
spec_imag = spec_imag[:, :self.freq_bins, :]
if output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):
raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`")
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = torch.rfft(v, 1, onesided=False)
# TODO: Can make the W_r and W_i trainable here
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i
if norm == 'ortho':
V[:, :, 0] /= np.sqrt(N) * 2
V[:, :, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V
return V.permute(0,2,1) # swapping back the time axis and freq axis
def forward(self, x):
"""
Convert a batch of waveforms to MFCC.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = self.melspec_layer(x)
x = self._power_to_db(x)
x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]
return x
def extra_repr(self) -> str:
return 'n_mfcc = {}'.format(
(self.n_mfcc)
)
class Gammatonegram(torch.nn.Module):
"""
This function is to calculate the Gammatonegram of the input signal. Input signal should be in either of the following shapes. 1. ``(len_audio)``, 2. ``(num_audio, len_audio)``, 3. ``(num_audio, 1, len_audio)``. The correct shape will be inferred autommatically if the input follows these 3 shapes. This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Gammatonegram filter banks. The filter banks maps the n_fft to Gammatone bins. Default value is 64
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions. The default value is 'hann'
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time index is the beginning of the STFT kernel, if ``True``, the time index is the center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic. The default value is ``False``
fmin : int
The starting frequency for the lowest Gammatone filter bank
fmax : int
The ending frequency for the highest Gammatone filter bank
trainable_mel : bool
Determine if the Gammatone filter banks are trainable or not. If ``True``, the gradients for Mel filter banks will also be caluclated and the Mel filter banks will be updated during model training. Default value is ``False``
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT kernels will also be caluclated and the STFT kernels will be updated during model training. Default value is ``False``
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cuda:0'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.Gammatonegram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=44100, n_fft=2048, n_bins=64, hop_length=512, window='hann', center=True, pad_mode='reflect',
power=2.0, htk=False, fmin=20.0, fmax=None, norm=1, trainable_bins=False, trainable_STFT=False,
verbose=True, device='cuda:0'):
super(Gammatonegram, self).__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.device = device
self.power = power
# Create filter windows for stft
start = time()
wsin, wcos, self.bins2freq, _, _ = create_fourier_kernels(n_fft, freq_bins=None, window=window, freq_scale='no',
sr=sr)
wsin = torch.tensor(wsin, dtype=torch.float)
wcos = torch.tensor(wcos, dtype=torch.float)
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
# Creating kenral for Gammatone spectrogram
start = time()
gammatone_basis = gammatone(sr, n_fft, n_bins, fmin, fmax)
gammatone_basis = torch.tensor(gammatone_basis)
if verbose == True:
print("STFT filter created, time used = {:.4f} seconds".format(time() - start))
print("Gammatone filter created, time used = {:.4f} seconds".format(time() - start))
else:
pass
# Making everything nn.Prarmeter, so that this model can support nn.DataParallel
if trainable_bins:
gammatone_basis = torch.nn.Parameter(gammatone_basis, requires_grad=trainable_bins)
self.register_parameter('gammatone_basis', gammatone_basis)
else:
self.register_buffer('gammatone_basis', gammatone_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft // 2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft // 2)
x = padding(x)
spec = torch.sqrt(conv1d(x, self.wsin, stride=self.stride).pow(2) \
+ conv1d(x, self.wcos, stride=self.stride).pow(2)) ** self.power # Doing STFT by using conv1d
gammatonespec = torch.matmul(self.gammatone_basis, spec)
return gammatonespec
class CQT1992(torch.nn.Module):
"""
This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,
trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,
output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,
window='ones',
freq_scale='no')
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_CQT)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_CQT)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
CQT = torch.stack((CQT_real,-CQT_imag),-1)
if self.norm:
CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(torch.nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,
norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,
trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_STFT)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_STFT)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_CQT)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_CQT)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, Judith C.C. and Miller Puckette. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',
earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate early downsampling later if possible
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, lenghts = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x,output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT*self.downsample_factor
# Normalize again to get same result as librosa
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
# The section below is for developing purpose
# Please don't use the following classes
#
class DFT(torch.nn.Module):
"""
Experimental feature before `torch.fft` was made avaliable.
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=50, fmax=6000, sr=22050):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
"""
Convert a batch of waveforms to spectrums.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x_real : torch tensor
Real part of the signal.
x_imag : torch tensor
Imaginary part of the signal.
"""
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
# ifft = e^(+2\pi*j)*X
# ifft(X_real) = (a1, a2)
# ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT(torch.nn.Module):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,
trainable_window=False, verbose=True, refresh_win=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False)
window_mask = get_window(window,int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter('kernel_sin', kernel_sin)
self.register_parameter('kernel_cos', kernel_cos)
else:
self.register_buffer('kernel_sin', kernel_sin)
self.register_buffer('kernel_cos', kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter('window_mask', window_mask)
else:
self.register_buffer('window_mask', window_mask)
if verbose==True:
print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win==None:
refresh_win=self.refresh_win
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
class Griffin_Lim(torch.nn.Module):
"""
Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1].
This Griffin Lim is a direct clone from librosa.griffinlim.
[1] Perraudin, N., Balazs, P., & Søndergaard, P. L. “A fast Griffin-Lim algorithm,”
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
n_iter=32 : int
The number of iterations for Griffin-Lim. The default value is ``32``
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
momentum : float
The momentum for the update rule. The default value is ``0.99``.
device : str
Choose which device to initialize this layer. Default value is 'cpu'
"""
def __init__(self,
n_fft,
n_iter=32,
hop_length=None,
win_length=None,
window='hann',
center=True,
pad_mode='reflect',
momentum=0.99,
device='cpu'):
super().__init__()
self.n_fft = n_fft
self.win_length = win_length
self.n_iter = n_iter
self.center = center
self.pad_mode = pad_mode
self.momentum = momentum
self.device = device
if win_length==None:
self.win_length=n_fft
else:
self.win_length=win_length
if hop_length==None:
self.hop_length = n_fft//4
else:
self.hop_length = hop_length
# Creating window function for stft and istft later
self.w = torch.tensor(get_window(window,
int(self.win_length),
fftbins=True),
device=device).float()
def forward(self, S):
"""
Convert a batch of magnitude spectrograms to waveforms.
Parameters
----------
S : torch tensor
Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``
"""
assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)"
# Initializing Random Phase
rand_phase = torch.randn(*S.shape, device=self.device)
angles = torch.empty((*S.shape,2), device=self.device)
angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)
angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)
# Initializing the rebuilt magnitude spectrogram
rebuilt = torch.zeros(*angles.shape, device=self.device)
for _ in range(self.n_iter):
tprev = rebuilt # Saving previous rebuilt magnitude spec
# spec2wav conversion
# print(f'win_length={self.win_length}\tw={self.w.shape}')
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
# wav2spec conversion
rebuilt = torch.stft(inverse,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
pad_mode=self.pad_mode)
# Phase update rule
angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]
# Phase normalization
angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase
# Using the final phase to reconstruct the waveforms
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
return inverse
class Combined_Frequency_Periodicity(nn.Module):
"""
Vectorized version of the code in https://github.com/leo-so/VocalMelodyExtPatchCNN/blob/master/MelodyExt.py.
This feature is described in 'Combining Spectral and Temporal Representations for Multipitch Estimation of Polyphonic Music'
https://ieeexplore.ieee.org/document/7118691
Under development, please report any bugs you found
"""
def __init__(self,fr=2, fs=16000, hop_length=320,
window_size=2049, fc=80, tc=1/1000,
g=[0.24, 0.6, 1], NumPerOct=48):
super().__init__()
self.window_size = window_size
self.hop_length = hop_length
# variables for STFT part
self.N = int(fs/float(fr)) # Will be used to calculate padding
self.f = fs*np.linspace(0, 0.5, np.round(self.N//2), endpoint=True) # it won't be used but will be returned
self.pad_value = ((self.N-window_size))
# Create window function, always blackmanharris?
h = scipy.signal.blackmanharris(window_size).astype(np.float32) # window function for STFT
self.register_buffer('h',torch.tensor(h))
# variables for CFP
self.NumofLayer = np.size(g)
self.g = g
self.tc_idx = round(fs*tc) # index to filter out top tc_idx and bottom tc_idx bins
self.fc_idx = round(fc/fr) # index to filter out top fc_idx and bottom fc_idx bins
self.HighFreqIdx = int(round((1/tc)/fr)+1)
self.HighQuefIdx = int(round(fs/fc)+1)
# attributes to be returned
self.f = self.f[:self.HighFreqIdx]
self.q = np.arange(self.HighQuefIdx)/float(fs)
# filters for the final step
freq2logfreq_matrix, quef2logfreq_matrix = self.create_logfreq_matrix(self.f, self.q, fr, fc, tc, NumPerOct, fs)
self.register_buffer('freq2logfreq_matrix',torch.tensor(freq2logfreq_matrix.astype(np.float32)))
self.register_buffer('quef2logfreq_matrix',torch.tensor(quef2logfreq_matrix.astype(np.float32)))
def _CFP(self, spec):
spec = torch.relu(spec).pow(self.g[0])
if self.NumofLayer >= 2:
for gc in range(1, self.NumofLayer):
if np.remainder(gc, 2) == 1:
ceps = torch.rfft(spec, 1, onesided=False)[:,:,:,0]/np.sqrt(self.N)
ceps = self.nonlinear_func(ceps, self.g[gc], self.tc_idx)
else:
spec = torch.rfft(ceps, 1, onesided=False)[:,:,:,0]/np.sqrt(self.N)
spec = self.nonlinear_func(spec, self.g[gc], self.fc_idx)
return spec, ceps
def forward(self, x):
tfr0 = torch.stft(x, self.N, hop_length=self.hop_length, win_length=self.window_size,
window=self.h, onesided=False, pad_mode='constant')
tfr0 = torch.sqrt(tfr0.pow(2).sum(-1))/torch.norm(self.h) # calcuate magnitude
tfr0 = tfr0.transpose(1,2)[:,1:-1] #transpose F and T axis and discard first and last frames
# The transpose is necessary for rfft later
# (batch, timesteps, n_fft)
tfr, ceps = self._CFP(tfr0)
# return tfr0
# removing duplicate bins
tfr0 = tfr0[:,:,:int(round(self.N/2))]
tfr = tfr[:,:,:int(round(self.N/2))]
ceps = ceps[:,:,:int(round(self.N/2))]
# Crop up to the highest frequency
tfr0 = tfr0[:,:,:self.HighFreqIdx]
tfr = tfr[:,:,:self.HighFreqIdx]
ceps = ceps[:,:,:self.HighQuefIdx]
tfrL0 = torch.matmul(self.freq2logfreq_matrix, tfr0.transpose(1,2))
tfrLF = torch.matmul(self.freq2logfreq_matrix, tfr.transpose(1,2))
tfrLQ = torch.matmul(self.quef2logfreq_matrix, ceps.transpose(1,2))
Z = tfrLF * tfrLQ
# Only need to calculate this once
self.t = np.arange(self.hop_length,
np.ceil(len(x)/float(self.hop_length))*self.hop_length,
self.hop_length) # it won't be used but will be returned
return Z#, tfrL0, tfrLF, tfrLQ
def nonlinear_func(self, X, g, cutoff):
cutoff = int(cutoff)
if g!=0:
X = torch.relu(X)
X[:, :, :cutoff] = 0
X[:, :, -cutoff:] = 0
X = X.pow(g)
else: # when g=0, it converges to log
X = torch.log(X)
X[:, :, :cutoff] = 0
X[:, :, -cutoff:] = 0
return X
def create_logfreq_matrix(self, f, q, fr, fc, tc, NumPerOct, fs):
StartFreq = fc
StopFreq = 1/tc
Nest = int(np.ceil(np.log2(StopFreq/StartFreq))*NumPerOct)
central_freq = [] # A list holding the frequencies in log scale
for i in range(0, Nest):
CenFreq = StartFreq*pow(2, float(i)/NumPerOct)
if CenFreq < StopFreq:
central_freq.append(CenFreq)
else:
break
Nest = len(central_freq)
freq_band_transformation = np.zeros((Nest-1, len(f)), dtype=np.float)
# Calculating the freq_band_transformation
for i in range(1, Nest-1):
l = int(round(central_freq[i-1]/fr))
r = int(round(central_freq[i+1]/fr)+1)
#rounding1
if l >= r-1:
freq_band_transformation[i, l] = 1
else:
for j in range(l, r):
if f[j] > central_freq[i-1] and f[j] < central_freq[i]:
freq_band_transformation[i, j] = (f[j] - central_freq[i-1]) / (central_freq[i] - central_freq[i-1])
elif f[j] > central_freq[i] and f[j] < central_freq[i+1]:
freq_band_transformation[i, j] = (central_freq[i + 1] - f[j]) / (central_freq[i + 1] - central_freq[i])
# Calculating the quef_band_transformation
f = 1/q # divide by 0, do I need to fix this?
quef_band_transformation = np.zeros((Nest-1, len(f)), dtype=np.float)
for i in range(1, Nest-1):
for j in range(int(round(fs/central_freq[i+1])), int(round(fs/central_freq[i-1])+1)):
if f[j] > central_freq[i-1] and f[j] < central_freq[i]:
quef_band_transformation[i, j] = (f[j] - central_freq[i-1])/(central_freq[i] - central_freq[i-1])
elif f[j] > central_freq[i] and f[j] < central_freq[i+1]:
quef_band_transformation[i, j] = (central_freq[i + 1] - f[j]) / (central_freq[i + 1] - central_freq[i])
return freq_band_transformation, quef_band_transformation
| 96,009 | 41.976723 | 401 | py |
ReconVAT | ReconVAT-master/model/Segmentation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.nn.init as init
import numpy as np
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)
if binwise==True:
d = d/(torch.abs(d)+1e-8)
else:
d = d/(torch.norm(d, dim=-1, keepdim=True))
return d
class Seg_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(self, XI, epsilon, n_power, KL_Div, reconstruction=False):
super().__init__()
self.n_power = n_power
self.XI = XI
self.epsilon = epsilon
self.KL_Div = KL_Div
self.binwise = False
self.reconstruction = reconstruction
def forward(self, model, x):
with torch.no_grad():
y_ref = model(x) # This will be used as a label, therefore no need grad()
# if self.reconstruction:
# pianoroll, _ = model.transcriber(x)
# reconstruction, _ = self.reconstructor(pianoroll)
# pianoroll2_ref, _ = self.transcriber(reconstruction)
# generate_virtual_adversarial_perturbation
d = torch.randn_like(x, requires_grad=True) # Need gradient
# if self.reconstruction:
# d2 = torch.randn_like(x, requires_grad=True) # Need gradient
for _ in range(self.n_power):
r = self.XI * _l2_normalize(d, binwise=self.binwise)
x_adv = (x + r).clamp(0,1)
y_pred = model(x_adv)
if self.KL_Div==True:
loss = binary_kl_div(y_pred, y_ref)
else:
loss =F.binary_cross_entropy(y_pred, y_ref)
loss.backward() # Calculate gradient wrt d
d = d.grad.detach()*1e10
model.zero_grad() # prevent gradient change in the model
# generating virtual labels and calculate VAT
r_adv = self.epsilon * _l2_normalize(d, binwise=self.binwise)
assert torch.isnan(r_adv).any()==False, f"r_adv has nan, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
assert torch.isnan(r_adv).any()==False, f"r_adv has inf, d min={d.min()} d max={d.max()} d mean={d.mean()} please debug tune down the XI for VAT"
# print(f'd max = {d.max()}\td min = {d.min()}')
# print(f'r_adv max = {r_adv.max()}\tr_adv min = {r_adv.min()}')
# logit_p = logit.detach()
x_adv = (x + r_adv).clamp(0,1)
y_pred = model(x_adv)
if self.KL_Div==True:
vat_loss = binary_kl_div(y_pred, y_ref)
else:
vat_loss = F.binary_cross_entropy(y_pred, y_ref)
return vat_loss, r_adv, _l2_normalize(d, binwise=self.binwise) # already averaged
def calculate_padding(input_size, kernel_size, stride):
def calculate_padding_1D(input_size, kernel_size, stride):
if (input_size%stride==0):
pad = max(kernel_size-stride, 0)
else:
pad = max(kernel_size-(input_size%stride), 0)
return pad
if type(kernel_size) != tuple:
kernel_size_1 = kernel_size
kernel_size_2 = kernel_size
else:
kernel_size_1 = kernel_size[0]
kernel_size_2 = kernel_size[1]
if type(stride) != tuple:
stride_1 = stride
stride_2 = stride
else:
stride_1 = stride[0]
stride_2 = stride[1]
padding1 = calculate_padding_1D(input_size[0], kernel_size_1, stride_1)
padding2 = calculate_padding_1D(input_size[1], kernel_size_2, stride_2)
pad_top = padding1//2
pad_bottom = padding1 - pad_top
pad_left = padding2//2
pad_right = padding2 - pad_left
return (pad_left,pad_right,pad_top,pad_bottom)
def transpose_padding_same(x, input_shape, stride):
"""
Trying to implement padding='SAME' as in tensorflow for the Conv2dTranspose layer.
It is basically trying to remove paddings from the output
"""
input_shape = torch.tensor(input_shape[2:])*torch.tensor(stride)
output_shape = torch.tensor(x.shape[2:])
if torch.equal(input_shape,output_shape):
print(f'same, no need to do anything')
pass
else:
padding_remove = (output_shape-input_shape)
left = padding_remove//2
right = padding_remove//2+padding_remove%2
return x[:,:,left[0]:-right[0],left[1]:-right[1]]
def SAME_padding(x, ksize, stride):
padding = calculate_padding(x.shape[2:], ksize, stride)
return F.pad(x, padding)
class Conv_Block(nn.Module):
def __init__(self, inp, out, ksize, stride=(2,2), dilation_rate=1, dropout_rate=0.4):
super().__init__()
self.ksize = ksize
self.stride = stride
self.stride_conv2 = 1
self.ksize_skip = 1
padding=0 # We don't pad with the Conv2d class, we use F.pad to pad instead
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation_rate)
self.bn1 = nn.BatchNorm2d(inp)
self.dropout1 = nn.Dropout(dropout_rate)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, stride=self.stride_conv2, padding=padding, dilation=dilation_rate)
self.bn2 = nn.BatchNorm2d(out)
self.dropout2 = nn.Dropout(dropout_rate)
self.conv_skip = nn.Conv2d(inp, out, kernel_size=self.ksize_skip, stride=stride, padding=padding)
def forward(self, x):
skip = x # save a copy for the skip connection later
x = self.bn1(torch.relu(x))
x = self.dropout1(x)
# Calculating padding corresponding to 'SAME' in tf
x = SAME_padding(x, self.ksize, self.stride)
x = self.conv1(x)
x = self.bn2(torch.relu(x))
x = self.dropout2(x)
# Calculating padding corresponding to 'SAME' in tf
x = SAME_padding(x, self.ksize, self.stride_conv2)
x = self.conv2(x)
if self.stride!=(1,1):
# Calculating padding corresponding to 'SAME' in tf
skip = SAME_padding(skip, self.ksize_skip, self.stride)
# Padding is mostly 0 so far, comment it out first
skip = self.conv_skip(skip)
x = x + skip # skip connection
return x
class transpose_conv_block(nn.Module):
def __init__(self, inp, out, ksize, stride=(2,2), dropout_rate=0.4):
super().__init__()
self.stride = stride
self.ksize = ksize
padding=0 # We don't pad with the Conv2d class, we use F.pad to pad instead
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, stride=(1,1), padding=padding)
self.bn1 = nn.BatchNorm2d(inp)
self.dropout1 = nn.Dropout(dropout_rate)
self.conv2 = nn.ConvTranspose2d(out, out, kernel_size=ksize, stride=stride, padding=padding)
self.bn2 = nn.BatchNorm2d(out)
self.dropout2 = nn.Dropout(dropout_rate)
self.conv_skip = nn.ConvTranspose2d(inp, out, kernel_size=1, stride=stride, padding=padding)
def forward(self, x, shape):
skip = x # save a copy for the skip connection later
input_shape_skip = skip.shape # will be used as in the transpose padding later
x = self.bn1(torch.relu(x))
x = self.dropout1(x)
x = SAME_padding(x, self.ksize, (1,1))
x = self.conv1(x)
# transpose_conv1 = torch.Size([1, 128, 40, 15])
x = self.bn2(torch.relu(x))
x = self.dropout2(x)
input_shape = x.shape
x = self.conv2(x)
x = transpose_padding_same(x, input_shape, self.stride)
# Removing extra pixels induced due to ConvTranspose
if x.shape[2]>shape[2]:
x = x[:,:,:-1,:]
if x.shape[3]>shape[3]:
x = x[:,:,:,:-1]
# transpose_conv2 = torch.Size([1, 128, 83, 35])
if self.stride!=(1,1):
# Check keras about the transConv output shape
skip = self.conv_skip(skip, output_size=x.shape) # make output size same as x
# skip = transpose_padding_same(skip, input_shape_skip, self.stride)
x = x + skip # skip connection
return x
class Decoder_Block(nn.Module):
def __init__(self,
input_channels,
encoder_channels,
hidden_channels,
output_channels,
dropout_rate=0.4):
super().__init__()
# Again, not using Conv2d to calculate the padding,
# use F.pad to obtain a more general padding under forward
self.ksize = (1,1)
self.stride = (1,1)
self.layer1a = nn.Conv2d(input_channels+encoder_channels, hidden_channels, kernel_size=self.ksize, stride=self.stride) # the channel dim for feature
self.bn = nn.BatchNorm2d(input_channels)
self.bn_en = nn.BatchNorm2d(encoder_channels)
self.dropout1 = nn.Dropout(dropout_rate)
self.layer1b = transpose_conv_block(input_channels, output_channels, (3,3), (2,2))
def forward(self, x, encoder_output, encoder_shape):
skip = x # save a copy for the skip connection later
x = self.bn(torch.relu(x))
en_l = self.bn_en(torch.relu(encoder_output))
x = torch.cat((x, en_l), 1)
x = self.dropout1(x)
x = SAME_padding(x, self.ksize, self.stride)
x = self.layer1a(x)
x = x + skip
x = self.layer1b(x, encoder_shape)
return x
class MutliHeadAttention2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), groups=1, bias=False):
"""kernel_size is the 2D local attention window size"""
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
# Padding should always be (kernel_size-1)/2
# Isn't it?
self.padding_time = (kernel_size[0]-1)//2
self.padding_freq = (kernel_size[1]-1)//2
self.groups = groups
# Make sure the feature dim is divisible by the n_heads
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
# Relative position encoding
self.rel_t = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size[0], 1), requires_grad=True)
self.rel_f = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size[1]), requires_grad=True)
# Increasing the channel deapth (feature dim) with Conv2D
# kernel_size=1 such that it expands only the feature dim
# without affecting other dimensions
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding_freq, self.padding_freq, self.padding_time, self.padding_time])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
v_out = v_out.unfold(2, self.kernel_size[0], self.stride[0]).unfold(3, self.kernel_size[1], self.stride[1])
# (batch, channels, H, W, H_local_w, W_local_w)
k_out_t, k_out_f = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_t + self.rel_t, k_out_f + self.rel_f), dim=1) # relative position?
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
# (batch, n_heads, feature_per_head, H, W, local H X W)
# expand the last dimension s.t. it can multiple with the local att window
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
# (batch, n_heads, feature_per_head, H, W, 1)
# Alternative way to express dot product
# same as k_out = k_out.permute(0,1,3,4,2,5)
# and then energy = torch.matmul(q_out,k_out)
energy = (q_out * k_out).sum(dim=2, keepdim=True)
attention = F.softmax(energy, dim=-1)
# (batch, n_heads, 1, H, W, local HXW)
out = attention*v_out
# (batch, n_heads, feature_per_head, H, W, local HXW)
# (batch, c, H, W)
return out.sum(-1).flatten(1,2), attention.squeeze(2)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_t, 0, 1)
init.normal_(self.rel_f, 0, 1)
class Encoder(nn.Module):
def __init__(self,
input_size,
feature_num=128,
timesteps=256,
multi_grid_layer_n=1,
multi_grid_n=3,
ch_num=1,
prog=False,
dropout_rate=0.4,
out_class=2):
super().__init__()
# Parameters for the encoding layer
en_kernel_size = (7,7)
en_stride = (1,1)
# Again, not using Conv2d to calculate the padding,
# use F.pad to obtain a more general padding under forward
self.en_padding = calculate_padding(input_size, en_kernel_size, en_stride)
# Instead of using Z, it should be using Z_f and Z_q
# But for the sake of this experiment,
self.encoding_layer = nn.Conv2d(1, 2**5, kernel_size=en_kernel_size, stride=en_stride, padding=0)
self.layer1a = Conv_Block(2**5, 2**5, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer1b = Conv_Block(2**5, 2**5, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer2a = Conv_Block(2**5, 2**6, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer2b = Conv_Block(2**6, 2**6, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer2c = Conv_Block(2**6, 2**6, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3a = Conv_Block(2**6, 2**7, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer3b = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3c = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer3d = Conv_Block(2**7, 2**7, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4a = Conv_Block(2**7, 2**8, ksize=(3,3), stride=(2,2), dropout_rate=dropout_rate)
self.layer4b = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4c = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4d = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
self.layer4e = Conv_Block(2**8, 2**8, ksize=(3,3), stride=(1,1), dropout_rate=dropout_rate)
def forward(self, x):
skip = x # save a copy for the skip connection later
original_shape = x.shape
x = F.pad(x, self.en_padding)
x = self.encoding_layer(x)
x = self.layer1a(x)
x = self.layer1b(x)
en_l1 = x
shape1 = x.shape
x = self.layer2a(x)
x = self.layer2b(x)
x = self.layer2c(x)
shape2 = x.shape
en_l2 = x
x = self.layer3a(x)
x = self.layer3b(x)
x = self.layer3c(x)
x = self.layer3d(x)
shape3 = x.shape
en_l3 = x
x = self.layer4a(x)
x = self.layer4b(x)
x = self.layer4c(x)
x = self.layer4d(x)
x = self.layer4e(x)
shape4 = x.shape
en_l4 = x
# en_l4 and shape4 could not be used inside the decoder, that's why they are omitted
return x, (en_l1, en_l2, en_l3), (original_shape, shape1, shape2, shape3)
class Decoder(nn.Module):
def __init__(self,
dropout_rate=0.4):
super().__init__()
self.de_layer1 = Decoder_Block(2**7, 2**7, 2**7, 2**6, dropout_rate)
self.de_layer2 = Decoder_Block(2**6, 2**6, 2**6, 2**6, dropout_rate)
self.de_layer3 = Decoder_Block(2**6, 2**5, 2**6, 2**6, dropout_rate)
def forward(self, x, encoder_outputs, encoder_shapes):
x = self.de_layer1(x, encoder_outputs[-1], encoder_shapes[-2])
x = self.de_layer2(x, encoder_outputs[-2], encoder_shapes[-3])
x = self.de_layer3(x, encoder_outputs[-3], encoder_shapes[-4]) # Check this
return x
class Semantic_Segmentation(nn.Module):
def __init__(self, x, out_class=2, dropout_rate=0.4, log=True,
mode='imagewise', spec='Mel', device='cpu', XI=1e-6, eps=1e-2):
super().__init__()
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
if spec == 'CQT':
r=2
N_BINS = 88*r
self.spectrogram = Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
elif spec == 'CFP':
self.spectrogram = Spectrogram.CFP(fs=SAMPLE_RATE,
fr=4,
window_size=WINDOW_LENGTH,
hop_length=HOP_LENGTH,
fc=MEL_FMIN,
tc=1/MEL_FMAX)
N_BINS = self.spectrogram.quef2logfreq_matrix.shape[0]
else:
print(f'Please select a correct spectrogram')
self.log = log
self.normalize = Normalization(mode)
self.vat_loss = Seg_VAT(XI, eps, 1, False)
self.encoder = Encoder((x.shape[2:]), dropout_rate=dropout_rate)
self.attention_layer1 = MutliHeadAttention2D(256, 64, kernel_size=(17,17), stride=(1,1), groups=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.attention_layer2 = MutliHeadAttention2D(64, 128, kernel_size=(17,17), stride=(1,1), groups=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
# L218-221 of the original code
# Few layers before the Decoder part
self.layer0a = nn.Conv2d(384, 2**8, (1,1), (1,1))
self.layer0b = transpose_conv_block(2**8, 2**7, (3,3), (2,2))
self.decoder = Decoder(dropout_rate=dropout_rate)
# Last few layers that determines the output
self.bn_last = nn.BatchNorm2d(2**6)
self.dropout_last = nn.Dropout(dropout_rate)
self.conv_last = nn.Conv2d(2**6, out_class, (1,1), (1,1))
self.inference_model = nn.Linear(x.shape[-1], 88)
def forward(self, x):
x, encoder_outputs, encoder_shapes = self.encoder(x)
en_l4 = x # Will be appened with the attention output and decoder later
# Two layers of self-attention
x,_ = self.attention_layer1(en_l4)
x = self.bn1(torch.relu(x))
x, _ = self.attention_layer2(x)
x = self.bn2(torch.relu(x))
x = torch.cat((en_l4, x),1) # L216
# L218-221 of the original code
# Few layers before the Decoder part
x = SAME_padding(x, (1,1), (1,1))
x = self.layer0a(x)
x = x + en_l4
x = self.layer0b(x, encoder_shapes[-1]) # Transposing back to the Encoder shape
# Decoder part
x = self.decoder(x, encoder_outputs, encoder_shapes)
# Last few layers for the output block
x = self.bn_last(torch.relu(x))
x = self.dropout_last(x)
x = self.conv_last(x)
# We use a Linear layer as the inference model here
x = x.squeeze(1) # remove the channel dim
x = self.inference_model(x)
x = torch.sigmoid(x)
return x
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
if batch_ul:
audio_label_ul = batch_ul['audio']
spec = self.spectrogram(audio_label_ul.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
if self.log:
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1,-2).unsqueeze(1)
lds_ul, _, r_norm_ul = self.vat_loss(self, spec)
else:
lds_ul = torch.tensor(0.)
r_norm_ul = torch.tensor(0.)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
if VAT:
lds_l, r_adv, r_norm_l = self.vat_loss(self, spec)
r_adv = r_adv.squeeze(1) # remove the channel dimension
else:
r_adv = None
lds_l = torch.tensor(0.)
r_norm_l = torch.tensor(0.)
frame_pred = self(spec)
if self.training:
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': r_adv,
}
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/train_LDS_l': lds_l,
'loss/train_LDS_ul': lds_ul,
'loss/train_r_norm_l': r_norm_l.abs().mean(),
'loss/train_r_norm_ul': r_norm_ul.abs().mean()
}
else:
predictions = {
'onset': frame_pred.reshape(*frame_label.shape),
'frame': frame_pred.reshape(*frame_label.shape),
'r_adv': r_adv,
}
losses = {
'loss/test_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label),
'loss/test_LDS_l': lds_l,
'loss/test_r_norm_l': r_norm_l.abs().mean()
}
return predictions, losses, spec.squeeze(1)
def transcribe(self, batch):
audio_label = batch['audio']
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
if self.log:
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# swap spec bins with timesteps so that it fits LSTM later
spec = spec.transpose(-1,-2).unsqueeze(1) # shape (8,1,640,229)
pianoroll = self(spec)
predictions = {
'onset': pianoroll,
'frame': pianoroll,
}
return predictions
def load_my_state_dict(self, state_dict):
"""Useful when loading part of the weights. From https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param) | 25,776 | 39.15109 | 156 | py |
DFMGAN | DFMGAN-main/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
if key.startswith('G'):
kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {}))
kwargs.synthesis_kwargs.num_fp16_res = 4
kwargs.synthesis_kwargs.conv_clamp = 256
if key.startswith('D'):
kwargs.num_fp16_res = 4
kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
synthesis_kwargs = dnnlib.EasyDict(
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
G = networks.Generator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,502 | 50.411215 | 154 | py |
DFMGAN | DFMGAN-main/style_mixing.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate style mixing image matrix using pretrained network pickle."""
import os
import re
from typing import List
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--rows', 'row_seeds', type=num_range, help='Random seeds to use for image rows', required=True)
@click.option('--cols', 'col_seeds', type=num_range, help='Random seeds to use for image columns', required=True)
@click.option('--styles', 'col_styles', type=num_range, help='Style layer range', default='0-6', show_default=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', type=str, required=True)
def generate_style_mix(
network_pkl: str,
row_seeds: List[int],
col_seeds: List[int],
col_styles: List[int],
truncation_psi: float,
noise_mode: str,
outdir: str
):
"""Generate images using pretrained network pickle.
Examples:
\b
python style_mixing.py --outdir=out --rows=85,100,75,458,1500 --cols=55,821,1789,293 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
print('Generating W vectors...')
all_seeds = list(set(row_seeds + col_seeds))
all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])
all_w = G.mapping(torch.from_numpy(all_z).to(device), None)
w_avg = G.mapping.w_avg
all_w = w_avg + (all_w - w_avg) * truncation_psi
w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}
print('Generating images...')
all_images = G.synthesis(all_w, noise_mode=noise_mode)
all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}
print('Generating style-mixed images...')
for row_seed in row_seeds:
for col_seed in col_seeds:
w = w_dict[row_seed].clone()
w[col_styles] = w_dict[col_seed][col_styles]
image = G.synthesis(w[np.newaxis], noise_mode=noise_mode)
image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
image_dict[(row_seed, col_seed)] = image[0].cpu().numpy()
print('Saving images...')
os.makedirs(outdir, exist_ok=True)
for (row_seed, col_seed), image in image_dict.items():
PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png')
print('Saving image grid...')
W = G.img_resolution
H = G.img_resolution
canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
for row_idx, row_seed in enumerate([0] + row_seeds):
for col_idx, col_seed in enumerate([0] + col_seeds):
if row_idx == 0 and col_idx == 0:
continue
key = (row_seed, col_seed)
if row_idx == 0:
key = (col_seed, col_seed)
if col_idx == 0:
key = (row_seed, row_seed)
canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
canvas.save(f'{outdir}/grid.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_style_mix() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 4,891 | 40.109244 | 132 | py |
DFMGAN | DFMGAN-main/projector.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Project given image to the latent space of pretrained network pickle."""
import copy
import os
from time import perf_counter
import click
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import dnnlib
import legacy
def project(
G,
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
*,
num_steps = 1000,
w_avg_samples = 10000,
initial_learning_rate = 0.1,
initial_noise_factor = 0.05,
lr_rampdown_length = 0.25,
lr_rampup_length = 0.05,
noise_ramp_length = 0.75,
regularize_noise_weight = 1e5,
verbose = False,
device: torch.device
):
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
def logprint(*args):
if verbose:
print(*args)
G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore
# Compute w stats.
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
# Setup noise inputs.
noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name }
# Load VGG16 feature detector.
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
# Features for target image.
target_images = target.unsqueeze(0).to(device).to(torch.float32)
if target_images.shape[2] > 256:
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable
w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device)
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate)
# Init noise.
for buf in noise_bufs.values():
buf[:] = torch.randn_like(buf)
buf.requires_grad = True
for step in range(num_steps):
# Learning rate schedule.
t = step / num_steps
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth images from opt_w.
w_noise = torch.randn_like(w_opt) * w_noise_scale
ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
synth_images = G.synthesis(ws, noise_mode='const')
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
synth_images = (synth_images + 1) * (255/2)
if synth_images.shape[2] > 256:
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
# Features for synth images.
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
dist = (target_features - synth_features).square().sum()
# Noise regularization.
reg_loss = 0.0
for v in noise_bufs.values():
noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d()
while True:
reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2
reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2
if noise.shape[2] <= 8:
break
noise = F.avg_pool2d(noise, kernel_size=2)
loss = dist + reg_loss * regularize_noise_weight
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
# Save projected W for each optimization step.
w_out[step] = w_opt.detach()[0]
# Normalize noise.
with torch.no_grad():
for buf in noise_bufs.values():
buf -= buf.mean()
buf *= buf.square().mean().rsqrt()
return w_out.repeat([1, G.mapping.num_ws, 1])
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE')
@click.option('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True)
@click.option('--seed', help='Random seed', type=int, default=303, show_default=True)
@click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True)
@click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR')
def run_projection(
network_pkl: str,
target_fname: str,
outdir: str,
save_video: bool,
seed: int,
num_steps: int
):
"""Project given image to the latent space of pretrained network pickle.
Examples:
\b
python projector.py --outdir=out --target=~/mytargetimg.png \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
"""
np.random.seed(seed)
torch.manual_seed(seed)
# Load networks.
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as fp:
G = legacy.load_network_pkl(fp)['G_ema'].requires_grad_(False).to(device) # type: ignore
# Load target image.
target_pil = PIL.Image.open(target_fname).convert('RGB')
w, h = target_pil.size
s = min(w, h)
target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS)
target_uint8 = np.array(target_pil, dtype=np.uint8)
# Optimize projection.
start_time = perf_counter()
projected_w_steps = project(
G,
target=torch.tensor(target_uint8.transpose([2, 0, 1]), device=device), # pylint: disable=not-callable
num_steps=num_steps,
device=device,
verbose=True
)
print (f'Elapsed: {(perf_counter()-start_time):.1f} s')
# Render debug output: optional video and projected image and W vector.
os.makedirs(outdir, exist_ok=True)
if save_video:
video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M')
print (f'Saving optimization progress video "{outdir}/proj.mp4"')
for projected_w in projected_w_steps:
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
video.append_data(np.concatenate([target_uint8, synth_image], axis=1))
video.close()
# Save final projected frame and W vector.
target_pil.save(f'{outdir}/target.png')
projected_w = projected_w_steps[-1]
synth_image = G.synthesis(projected_w.unsqueeze(0), noise_mode='const')
synth_image = (synth_image + 1) * (255/2)
synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
PIL.Image.fromarray(synth_image, 'RGB').save(f'{outdir}/proj.png')
np.savez(f'{outdir}/projected_w.npz', w=projected_w.unsqueeze(0).cpu().numpy())
#----------------------------------------------------------------------------
if __name__ == "__main__":
run_projection() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 8,990 | 41.211268 | 136 | py |
DFMGAN | DFMGAN-main/generate.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--output', help='Where to save the output images', type=str, metavar='FILE', default = None)
@click.option('--cmp', help='Generate images for comparison', type=bool, metavar='BOOL', is_flag=True)
@click.option('--gen-good', help='Generate good images along with images', type=bool, metavar='BOOL', is_flag=True)
@click.option('--gen-mask', help='Generate masks along with images', type=bool, metavar='BOOL', is_flag=True)
@click.option('--num', help='Total number of generated images. Only when --seeds is unspecified. [default: 10 for cmp mode, 500 otherwise]', type=int)
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
output: str,
class_idx: Optional[int],
projected_w: Optional[str],
cmp: bool,
gen_mask: bool,
gen_good: bool,
num: int,
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py --outdir=out --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py --outdir=out --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device) # pylint: disable=not-callable
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
if num is None:
if cmp:
seeds = [x for x in range(10)]
else:
seeds = [x for x in range(100)]
else:
seeds = [x for x in range(num)]
#ctx.fail('--seeds option is required when not using --projected-w')
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
if output is not None and (not output.endswith('.png')):
os.makedirs(output, exist_ok=True)
if cmp and output is None:
assert network_pkl[-4:] == '.pkl'
kimg = network_pkl[-10:-4]
output = os.path.join(os.path.dirname(network_pkl), f'cmp{kimg}.png')
if not cmp and output is None:
print('--output must be specified when not using cmp mode')
exit(1)
if cmp:
canvas = []
for seed_idx, seed in tqdm(enumerate(seeds)):
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
if hasattr(G, 'transfer'):
transfer = (G.transfer != 'none')
else:
transfer = False
if transfer:
defect_z = torch.from_numpy(np.random.RandomState(seed + len(seeds)).randn(1, G.z_dim)).to(device)
ws = G.mapping(z, None)
defect_ws = G.defect_mapping(defect_z, label, truncation_psi=truncation_psi)
if G.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
img, mask = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = True, fix_residual_to_zero = False)
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = False, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0)
img = torch.cat([good_img, mask.repeat((1, 3, 1, 1)), img], dim = 2)
else:
img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = False)
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0)
img = torch.cat([good_img, img], dim = 2)
else:
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
canvas.append(img)
img = torch.cat(canvas, dim = 3)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
if not output.endswith('.png'):
output += '.png'
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{output}')
else:
for seed_idx, seed in tqdm(enumerate(seeds)):
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
if hasattr(G, 'transfer'):
transfer = (G.transfer != 'none')
else:
transfer = False
mask = None
if transfer:
defect_z = torch.from_numpy(np.random.RandomState(seed + len(seeds)).randn(1, G.z_dim)).to(device)
ws = G.mapping(z, None)
defect_ws = G.defect_mapping(defect_z, label, truncation_psi=truncation_psi)
if G.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
img, mask = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = True, fix_residual_to_zero = False)
if gen_good:
good_img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, output_mask = False, fix_residual_to_zero = True)
mask = torch.where(mask >= 0.0, 1.0, -1.0).repeat(1, 3, 1, 1)
else:
img = G.synthesis(ws, defect_ws, noise_mode=noise_mode, fix_residual_to_zero = False)
mask = torch.where(mask >= 0.0, 1.0, -1.0).repeat(1, 3, 1, 1)
else:
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = ((img.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_img.png' % seed_idx))
if gen_mask and (mask is not None):
mask = ((mask.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(mask[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_mask.png' % seed_idx))
if gen_good:
good = ((good_img.permute(0, 2, 3, 1) + 1.0) * 127.5).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(good[0].cpu().numpy(), 'RGB').save(os.path.join(output, '%d_good.png' % seed_idx))
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 10,000 | 45.300926 | 150 | py |
DFMGAN | DFMGAN-main/gen_gif_dfmgan.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seed', help='Random seed', default=0, type=int)
@click.option('--num', help='Number of samples', default=5, type=int)
@click.option('--resolution', help='Resolution of the output images', default=128, type=int)
@click.option('--num-phases', help='Number of phases', default=5, type=int)
@click.option('--transition-frames', help='Number of transition frames per phase', default=10, type=int)
@click.option('--static-frames', help='Number of static frames per phase', default=5, type=int)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--output', type=str)
@click.option('--fix-content', '--fc', help='Use fixed z_object', type=click.BOOL, default=False, is_flag = True)
@click.option('--cond', help = 'conditional, set a label or "all"', type=str, default = 'none')
def generate_gif(
network_pkl: str,
seed: int,
num: int,
resolution: int,
num_phases: int,
transition_frames: int,
static_frames: int,
truncation_psi: float,
noise_mode: str,
output: str,
fix_content: bool,
cond: str,
):
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
transfer = (G.transfer != 'none')
if not transfer:
print('Must be a transfer model.')
exit(1)
if output is None:
assert network_pkl[-4:] == '.pkl'
kimg = network_pkl[-10:-4]
output = os.path.join(os.path.dirname(network_pkl), f'itp{kimg}.gif' if not fix_content else f'itp{kimg}_fc.gif')
outdir = os.path.dirname(output)
if outdir:
os.makedirs(outdir, exist_ok=True)
np.random.seed(seed)
output_seq = []
if cond == 'all':
num = G.c_dim
batch_size = num
latent_size = G.z_dim
latents = [np.random.randn(batch_size, latent_size) if cond != 'all' else np.random.randn(1, latent_size).repeat(batch_size, 0) for _ in range(num_phases)]
if transfer:
latents_defect = [np.random.randn(batch_size, latent_size) if cond != 'all' else np.random.randn(1, latent_size).repeat(batch_size, 0) for _ in range(num_phases)]
if cond == 'all':
num_c = G.c_dim
cond_list = [np.diag([1 for _ in range(num_c)]) for _ in range(num_phases)]
elif cond != 'none':
num_c = G.c_dim
c_label = int(cond)
c_npy = np.zeros(num_c)
c_npy[c_label] = 1
cond_list = [c_npy.reshape(1, -1).repeat(batch_size, 0) for _ in range(num_phases)]
def to_image_grid(outputs):
canvas = []
for output in outputs:
output = np.reshape(output, [num, *output.shape[1:]])
output = np.concatenate(output, axis=1)
canvas.append(output)
canvas = np.concatenate(canvas, axis = 0)
return Image.fromarray(canvas).resize((resolution * num, resolution * len(outputs)), Image.ANTIALIAS)
def transfer_generate(dlatents, defectlatents):
images, masks = G.synthesis(dlatents, defectlatents, noise_mode=noise_mode, output_mask=True)
masks = masks.repeat((1, 3, 1, 1))
rounded_masks = masks.clone()
rounded_masks[rounded_masks >= G.mask_threshold] = 1.0
rounded_masks[rounded_masks < G.mask_threshold] = -1.0
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
masks = (masks.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
rounded_masks = (rounded_masks.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid([images, rounded_masks, masks])
for i in range(num_phases):
dlatents0 = G.mapping(torch.from_numpy(latents[i - 1] if not fix_content else latents[0]).to(device), None)
dlatents1 = G.mapping(torch.from_numpy(latents[i] if not fix_content else latents[0]).to(device), None)
defectlatents0 = G.defect_mapping(torch.from_numpy(latents_defect[i - 1]).to(device), None if cond == 'none' else torch.from_numpy(cond_list[i - 1]).to(device))
defectlatents1 = G.defect_mapping(torch.from_numpy(latents_defect[i]).to(device), None if cond == 'none' else torch.from_numpy(cond_list[i]).to(device))
for j in range(transition_frames):
dlatents = (dlatents0 * (transition_frames - j) + dlatents1 * j) / transition_frames
defectlatents = (defectlatents0 * (transition_frames - j) + defectlatents1 * j) / transition_frames
output_seq.append(transfer_generate(dlatents, defectlatents))
output_seq.extend([transfer_generate(dlatents, defectlatents1)] * static_frames)
if not output.endswith('.gif'):
output += '.gif'
output_seq[0].save(output, save_all=True, append_images=output_seq[1:], optimize=True, duration=100, loop=0)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_gif() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,711 | 41.947368 | 170 | py |
DFMGAN | DFMGAN-main/generate_gif.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seed', help='Random seed', default=0, type=int)
@click.option('--num-rows', help='Number of rows', default=2, type=int)
@click.option('--num-cols', help='Number of columns', default=2, type=int)
@click.option('--resolution', help='Resolution of the output images', default=128, type=int)
@click.option('--num-phases', help='Number of phases', default=5, type=int)
@click.option('--transition-frames', help='Number of transition frames per phase', default=20, type=int)
@click.option('--static-frames', help='Number of static frames per phase', default=5, type=int)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--output', type=str, required=True)
# DFMGAN args
@click.option('--latent-mode', help='randomly sampled latent codes', type=click.Choice(['both', 'content', 'defect', 'nores', 'none']), default='both', show_default=True)
def generate_gif(
network_pkl: str,
seed: int,
num_rows: int,
num_cols: int,
resolution: int,
num_phases: int,
transition_frames: int,
static_frames: int,
truncation_psi: float,
noise_mode: str,
output: str,
latent_mode: str,
):
"""Generate gif using pretrained network pickle.
Examples:
\b
python generate_gif.py --output=obama.gif --seed=0 --num-rows=1 --num-cols=8 \\
--network=https://hanlab.mit.edu/projects/data-efficient-gans/models/DiffAugment-stylegan2-100-shot-obama.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
transfer = (G.transfer != 'none')
outdir = os.path.dirname(output)
if outdir:
os.makedirs(outdir, exist_ok=True)
np.random.seed(seed)
output_seq = []
batch_size = num_rows * num_cols
latent_size = G.z_dim
latents = [np.random.randn(batch_size, latent_size) for _ in range(num_phases)]
if transfer:
latents_defect = [np.random.randn(batch_size, latent_size) for _ in range(num_phases)]
def to_image_grid(outputs):
outputs = np.reshape(outputs, [num_rows, num_cols, *outputs.shape[1:]])
outputs = np.concatenate(outputs, axis=1)
outputs = np.concatenate(outputs, axis=1)
return Image.fromarray(outputs).resize((resolution * num_cols, resolution * num_rows), Image.ANTIALIAS)
def generate(dlatents):
images = G.synthesis(dlatents, noise_mode=noise_mode)
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid(images)
def transfer_generate(dlatents, defectlatents):
images = G.synthesis(dlatents, defectlatents, noise_mode=noise_mode) if latent_mode != 'nores' else G.synthesis(dlatents, defectlatents, noise_mode=noise_mode, fix_residual_to_zero = True)
images = (images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
return to_image_grid(images)
for i in range(num_phases):
dlatents0 = G.mapping(torch.from_numpy(latents[i - 1] if latent_mode not in ['defect', 'none'] else latents[0]).to(device), None)
dlatents1 = G.mapping(torch.from_numpy(latents[i] if latent_mode not in ['defect', 'none'] else latents[0]).to(device), None)
if transfer:
defectlatents0 = G.defect_mapping(torch.from_numpy(latents_defect[i - 1] if latent_mode not in ['content', 'none'] else latents_defect[0]).to(device), None)
defectlatents1 = G.defect_mapping(torch.from_numpy(latents_defect[i] if latent_mode not in ['content', 'none'] else latents_defect[0]).to(device), None)
for j in range(transition_frames):
dlatents = (dlatents0 * (transition_frames - j) + dlatents1 * j) / transition_frames
if transfer:
defectlatents = (defectlatents0 * (transition_frames - j) + defectlatents1 * j) / transition_frames
output_seq.append(transfer_generate(dlatents, defectlatents))
else:
output_seq.append(generate(dlatents))
if transfer:
output_seq.extend([transfer_generate(dlatents1, defectlatents1)] * static_frames)
else:
output_seq.extend([generate(dlatents1)] * static_frames)
if not output.endswith('.gif'):
output += '.gif'
output_seq[0].save(output, save_all=True, append_images=output_seq[1:], optimize=False, duration=50, loop=0)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_gif() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,303 | 43.571429 | 196 | py |
DFMGAN | DFMGAN-main/train.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
class UserError(Exception):
pass
#----------------------------------------------------------------------------
def setup_training_loop_kwargs(
# General options (not included in desc).
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
seed = None, # Random seed: <int>, default = 0
# Dataset.
data = None, # Training dataset (required): <path>
cond = None, # Train conditional model based on dataset labels: <bool>, default = False
subset = None, # Train with only N images: <int>, default = all
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Base config.
cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'
gamma = None, # Override R1 gamma: <float>
kimg = None, # Override training duration: <int>
batch = None, # Override batch size: <int>
# Discriminator augmentation.
aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'
p = None, # Specify p for 'fixed' (required): <float>
target = None, # Override ADA target for 'ada': <float>, default = depends on aug
augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
# Performance options (not included in desc).
fp32 = None, # Disable mixed-precision training: <bool>, default = False
nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False
allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False
nobench = None, # Disable cuDNN benchmarking: <bool>, default = False
workers = None, # Override number of DataLoader workers: <int>, default = 3
# DFMGAN args
ft = None,
transfer = None,
res_st = None,
uni_st = None,
mask_threshold = None,
lambda_match = None,
mode_seek = None,
lambda_ms = None,
no_round = None,
tanh_k = None,
tanh_mask = None,
dmatch_scale = None,
):
args = dnnlib.EasyDict()
# ------------------------------------------
# General options: gpus, snap, metrics, seed
# ------------------------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
if metrics is None:
if transfer is not None:
metrics = ['fid5k_full', 'kid5k_full', 'clpips1k']
else:
metrics = ['fid50k_full']
assert isinstance(metrics, list)
if not all(metric_main.is_valid_metric(metric) for metric in metrics):
raise UserError('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
args.metrics = metrics
if seed is None:
seed = 0
assert isinstance(seed, int)
args.random_seed = seed
# -----------------------------------
# Dataset: data, cond, subset, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
try:
training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
desc = training_set.name
del training_set # conserve memory
except IOError as err:
raise UserError(f'--data: {err}')
if cond is None:
cond = False
assert isinstance(cond, bool)
if cond:
if not args.training_set_kwargs.use_labels:
raise UserError('--cond=True requires labels specified in dataset.json')
desc += '-cond'
else:
args.training_set_kwargs.use_labels = False
if subset is not None:
assert isinstance(subset, int)
if not 1 <= subset <= args.training_set_kwargs.max_size:
raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')
desc += f'-subset{subset}'
if subset < args.training_set_kwargs.max_size:
args.training_set_kwargs.max_size = subset
args.training_set_kwargs.random_seed = args.random_seed
if mirror is None:
mirror = False
assert isinstance(mirror, bool)
if mirror:
desc += '-mirror'
args.training_set_kwargs.xflip = True
# ------------------------------------
# Base config: cfg, gamma, kimg, batch
# ------------------------------------
if cfg is None:
cfg = 'auto'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {
'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2), # Populated dynamically based on resolution and GPU count.
'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # Uses mixed-precision, unlike the original StyleGAN2.
'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
'cifar': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=1, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
}
assert cfg in cfg_specs
spec = dnnlib.EasyDict(cfg_specs[cfg])
if cfg == 'auto':
desc += f'{gpus:d}'
spec.ref_gpus = gpus
res = args.training_set_kwargs.resolution
spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
spec.fmaps = 1 if res >= 512 else 0.5
spec.lrate = 0.002 if res >= 1024 else 0.0025
spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
spec.ema = spec.mb * 10 / 32
args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict())
target_D_class = 'training.networks.%s' % ('DiscriminatorUnified' if transfer == 'res_block_uni_dis' else 'Discriminator')
args.D_kwargs = dnnlib.EasyDict(class_name=target_D_class, block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)
args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
args.G_kwargs.mapping_kwargs.num_layers = spec.map
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
if transfer == 'res_block_match_dis':
args.D_match_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
if dmatch_scale is None:
dmatch_base = int(spec.fmaps * 32768) # 16384
dmatch_max = 512
else:
dmatch_base = int(dmatch_scale.split('/')[0])
dmatch_max = int(dmatch_scale.split('/')[1])
args.D_match_kwargs.channel_base = dmatch_base #int(spec.fmaps * 32768) # 16384 (= 16 * 1024)
args.D_match_kwargs.channel_max = dmatch_max #512
args.D_match_kwargs.num_fp16_res = 4
args.D_match_kwargs.conv_clamp = 256
args.D_match_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
if transfer == 'res_block_match_dis':
args.D_match_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
args.total_kimg = spec.kimg
args.batch_size = spec.mb
args.batch_gpu = spec.mb // spec.ref_gpus
args.ema_kimg = spec.ema
args.ema_rampup = spec.ramp
if cfg == 'cifar':
args.loss_kwargs.pl_weight = 0 # disable path length regularization
args.loss_kwargs.style_mixing_prob = 0 # disable style mixing
args.D_kwargs.architecture = 'orig' # disable residual skip connections
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_kwargs.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
if batch is not None:
assert isinstance(batch, int)
if not (batch >= 1 and batch % gpus == 0):
raise UserError('--batch must be at least 1 and divisible by --gpus')
desc += f'-batch{batch}'
args.batch_size = batch
args.batch_gpu = batch // gpus
# ---------------------------------------------------
# Discriminator augmentation: aug, p, target, augpipe
# ---------------------------------------------------
if aug is None:
aug = 'ada'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'ada':
args.ada_target = 0.6
elif aug == 'noaug':
pass
elif aug == 'fixed':
if p is None:
raise UserError(f'--aug={aug} requires specifying --p')
else:
raise UserError(f'--aug={aug} not supported')
if p is not None:
assert isinstance(p, float)
if aug != 'fixed':
raise UserError('--p can only be specified with --aug=fixed')
if not 0 <= p <= 1:
raise UserError('--p must be between 0 and 1')
desc += f'-p{p:g}'
args.augment_p = p
if target is not None:
assert isinstance(target, float)
if aug != 'ada':
raise UserError('--target can only be specified with --aug=ada')
if not 0 <= target <= 1:
raise UserError('--target must be between 0 and 1')
desc += f'-target{target:g}'
args.ada_target = target
assert augpipe is None or isinstance(augpipe, str)
if augpipe is None:
augpipe = 'bgc'
else:
if aug == 'noaug':
raise UserError('--augpipe cannot be specified with --aug=noaug')
desc += f'-{augpipe}'
augpipe_specs = {
'blit': dict(xflip=1, rotate90=1, xint=1),
'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'filter': dict(imgfilter=1),
'noise': dict(noise=1),
'cutout': dict(cutout=1),
'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
}
assert augpipe in augpipe_specs
if aug != 'noaug':
args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])
# ----------------------------------
# Transfer learning: resume, freezed
# ----------------------------------
resume_specs = {
'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
}
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
elif resume in resume_specs:
desc += f'-resume{resume}'
args.resume_pkl = resume_specs[resume] # predefined url
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.ada_kimg = 100 # make ADA react faster at the beginning
args.ema_rampup = None # disable EMA rampup
if freezed is not None:
assert isinstance(freezed, int)
if not freezed >= 0:
raise UserError('--freezed must be non-negative')
desc += f'-freezed{freezed:d}'
args.D_kwargs.block_kwargs.freeze_layers = freezed
# -------------------------------------------------
# Performance options: fp32, nhwc, nobench, workers
# -------------------------------------------------
if fp32 is None:
fp32 = False
assert isinstance(fp32, bool)
if fp32:
args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0
args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None
args.D_match_kwargs.num_fp16_res = 0
args.D_match_kwargs.conv_clamp = None
if nhwc is None:
nhwc = False
assert isinstance(nhwc, bool)
if nhwc:
args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True
args.D_match_kwargs.block_kwargs.fp16_channels_last = True
if nobench is None:
nobench = False
assert isinstance(nobench, bool)
if nobench:
args.cudnn_benchmark = False
if allow_tf32 is None:
allow_tf32 = False
assert isinstance(allow_tf32, bool)
if allow_tf32:
args.allow_tf32 = True
if workers is not None:
assert isinstance(workers, int)
if not workers >= 1:
raise UserError('--workers must be at least 1')
args.data_loader_kwargs.num_workers = workers
# DFMGAN args
if ft is None or resume == 'noresume':
args.ft = 'default'
else:
args.ft = ft
if transfer is None:
transfer = 'none'
elif transfer != 'none':
args.ft = 'transfer'
args.G_kwargs.transfer = transfer
args.loss_kwargs.transfer = transfer
if transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
if res_st is None:
args.G_kwargs.synthesis_kwargs.res_st = 64
else:
args.G_kwargs.synthesis_kwargs.res_st = int(res_st)
if transfer == 'res_block_uni_dis':
if uni_st is None:
args.D_kwargs.uni_st = 64
else:
args.D_kwargs.uni_st = int(uni_st)
if mask_threshold is None:
args.G_kwargs.mask_threshold = 0.0
else:
args.G_kwargs.mask_threshold = float(mask_threshold)
if lambda_match is None:
args.loss_kwargs.lambda_match = 1.0
else:
args.loss_kwargs.lambda_match = float(lambda_match)
args.loss_kwargs.mode_seek = 'none' if (transfer == 'none' or mode_seek is None) else mode_seek
args.loss_kwargs.lambda_ms = 0.1 if lambda_ms is None else float(lambda_ms)
if no_round is None:
no_round = False
assert isinstance(no_round, bool)
args.G_kwargs.synthesis_kwargs.no_round = no_round
if tanh_mask is None:
tanh_mask = 'none'
if tanh_k is None:
tanh_k = 1.0
args.G_kwargs.synthesis_kwargs.tanh_mask = args.loss_kwargs.tanh_mask = tanh_mask
args.G_kwargs.synthesis_kwargs.tanh_k = args.loss_kwargs.tanh_k = tanh_k
return desc, args
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **args)
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
# General options.
@click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
@click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
@click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
@click.option('--metrics', help='Comma-separated list or "none" [default: fid5k_full, kid5k_full, is5k]', type=CommaSeparatedList())
@click.option('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# Dataset.
@click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
@click.option('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL', is_flag = True)
@click.option('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
@click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
# Base config.
@click.option('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar']))
@click.option('--gamma', help='Override R1 gamma', type=float)
@click.option('--kimg', help='Override training duration', type=int, metavar='INT')
@click.option('--batch', help='Override batch size', type=int, metavar='INT')
# Discriminator augmentation.
@click.option('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
@click.option('--p', help='Augmentation probability for --aug=fixed', type=float)
@click.option('--target', help='ADA target value for --aug=ada', type=float)
@click.option('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc']))
# Transfer learning.
@click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
@click.option('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
# Performance options.
@click.option('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
@click.option('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
@click.option('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
@click.option('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
@click.option('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
# DFMGAN args
@click.option('--ft', help='Finetune mode [default: default]',
type=click.Choice(['default', 'ft_map', 'ft_syn', 'ft_syn_2', 'ft_map_syn_2']))
@click.option('--transfer', help='Extra network for transfer learning [default: none]', type=click.Choice(['none', 'dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']))
@click.option('--res-st', help='Starting resolution for ResBlock [default: 64]', type=click.Choice(['4', '8', '16', '32', '64', '128', '256']), metavar='INT')
@click.option('--uni-st', help='Starting resolution for UnifiedBlock of Discriminator [default: 64]', type=click.Choice(['4', '8', '16', '32', '64', '128', '256']), metavar='INT')
@click.option('--mask-threshold', help='The threshold value between mask/non-mask regions [default: 0.0]', type=float)
@click.option('--lambda-match', help='Gmain_loss = loss_from_D + lambda * loss_from_D_match [default: 1.0]', type=float)
@click.option('--mode-seek', help='Method for mode seeking loss [default: w/mask]', default='w/mask', type=click.Choice(['none', 'w/mask', 'w/img', 'z/mask']))
@click.option('--lambda-ms', help='loss_Gmain + lambda * loss_MS [default: 1.0]', type=float)
@click.option('--no-round', help='Use a soft mask if setting True [default: False]', type=bool, metavar='BOOL', is_flag = True)
@click.option('--tanh-k', help='mask = tanh(k * raw_mask) [default: 10.0]', default=10.0, type=float)
@click.option('--tanh-mask', help='Add tanh() to mask [default: late]', default='late', type=click.Choice(['none', 'late']))
@click.option('--dmatch-scale', help='D_match channel base / channel max [default: 4096/128]', default='4096/128', type=click.Choice(['16384/512', '8192/256', '4096/128']))
#@click.option('--transfer-cond', help='Enable multi-class defects [default: False]', type = bool, metavar = 'BOOL', is_flag = True)
def main(ctx, outdir, dry_run, **config_kwargs):
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data".
Examples:
\b
# Train with custom dataset using 1 GPU.
python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1
\b
# Train class-conditional CIFAR-10 using 2 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\
--gpus=2 --cfg=cifar --cond=1
\b
# Transfer learn MetFaces from FFHQ using 4 GPUs.
python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\
--gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
\b
# Reproduce original StyleGAN2 config F.
python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\
--gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug
\b
Base configs (--cfg):
auto Automatically select reasonable defaults based on resolution
and GPU count. Good starting point for new datasets.
stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
paper1024 Reproduce results for MetFaces at 1024x1024.
cifar Reproduce results for CIFAR-10 at 32x32.
\b
Transfer learning source networks (--resume):
ffhq256 FFHQ trained at 256x256 resolution.
ffhq512 FFHQ trained at 512x512 resolution.
ffhq1024 FFHQ trained at 1024x1024 resolution.
celebahq256 CelebA-HQ trained at 256x256 resolution.
lsundog256 LSUN Dog trained at 256x256 resolution.
<PATH or URL> Custom network pickle.
"""
dnnlib.util.Logger(should_flush=True)
# Setup training options.
try:
run_desc, args = setup_training_loop_kwargs(**config_kwargs)
except UserError as err:
ctx.fail(err)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(args.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(args, indent=2))
print()
print(f'Output directory: {args.run_dir}')
print(f'Training data: {args.training_set_kwargs.path}')
print(f'Training duration: {args.total_kimg} kimg')
print(f'Number of GPUs: {args.num_gpus}')
print(f'Number of images: {args.training_set_kwargs.max_size}')
print(f'Image resolution: {args.training_set_kwargs.resolution}')
print(f'Conditional model: {args.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(args.run_dir)
with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
json.dump(args, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 29,176 | 44.095827 | 192 | py |
DFMGAN | DFMGAN-main/calc_metrics.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Calculate quality metrics for previous training run or pretrained network pickle."""
import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
#----------------------------------------------------------------------------
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Print network summary.
G = None
device = torch.device('cuda', rank)
if not hasattr(args, 'dataset2_kwargs'):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
'''
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
'''
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
input_list = [z, c]
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.empty([1, G.z_dim], device=device)
input_list.append(defect_z)
if G.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
input_list.append(True)
misc.print_module_summary(G, input_list)
else:
misc.print_module_summary(G, input_list)
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress, dataset2_kwargs=args.dataset2_kwargs if hasattr(args, 'dataset2_kwargs') else {}, cache = args.cache)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
#----------------------------------------------------------------------------
class CommaSeparatedList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
_ = param, ctx
if isinstance(value, list):
return value
if value is None or value.lower() == 'none' or value == '':
return []
return value.split(',')
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH')
@click.option('--metrics', help='Comma-separated list or "none"', type=CommaSeparatedList(), default='fid5k_full,kid5k_full,clpips1k', show_default=True)
@click.option('--data', help='Dataset to evaluate metrics against (directory or zip) [default: same as training data]', metavar='PATH')
@click.option('--data2', help='Dataset2 to evaluate metrics against (directory or zip)', metavar='PATH')
@click.option('--mirror', help='Whether the dataset was augmented with x-flips during training [default: look up]', type=bool, metavar='BOOL')
@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True)
@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True)
@click.option('--cache', help='Use computed cache', type=bool, default=False, metavar='BOOL', show_default=True)
def calc_metrics(ctx, network_pkl, metrics, data, data2, mirror, gpus, verbose, cache):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=pr50k3_full \\
--network=~/training-runs/00000-ffhq10k-res64-auto1/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq.zip --mirror=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl
Available metrics:
\b
ADA paper:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
is50k Inception score for CIFAR-10.
\b
StyleGAN and StyleGAN2 papers:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
ppl2_wend Perceptual path length in W at path endpoints against full image.
ppl_zfull Perceptual path length in Z for full paths against cropped image.
ppl_wfull Perceptual path length in W for full paths against cropped image.
ppl_zend Perceptual path length in Z at path endpoints against cropped image.
ppl_wend Perceptual path length in W at path endpoints against cropped image.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose, cache=cache)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if network_pkl is not None:
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
if data2 is not None:
args.dataset2_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data2)
# Finalize dataset options.
if network_pkl is not None:
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
else:
args.dataset_kwargs.resolution = args.dataset2_kwargs.resolution = 256
args.dataset_kwargs.use_labels = args.dataset2_kwargs.use_labels = False
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if network_pkl is not None:
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
#----------------------------------------------------------------------------
if __name__ == "__main__":
calc_metrics() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 9,992 | 43.413333 | 182 | py |
DFMGAN | DFMGAN-main/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, D, lambda_match, lambda_ms, mode_seek, tanh_mask, tanh_k, D_match = None, augment_pipe=None, G_defect_mapping = None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2,
transfer=None):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.D = D
self.D_match = D_match
if transfer == 'res_block_match_dis':
assert self.D_match is not None
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
self.G_defect_mapping = G_defect_mapping
self.transfer = transfer
self.lambda_match = lambda_match
self.lambda_ms = lambda_ms
self.mode_seek = mode_seek
self.tanh_mask = tanh_mask
self.tanh_k = tanh_k
self.phases_printed = False
def run_G(self, z, c, sync, defect_z = None, transfer = 'none', output_mask = False, mode_seek = 'none'):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)[:, cutoff:]
if transfer != 'none':
with misc.ddp_sync(self.G_defect_mapping, sync):
defect_ws = self.G_defect_mapping(defect_z, c)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
defect_cutoff = torch.empty([], dtype=torch.int64, device=defect_ws.device).random_(1, defect_ws.shape[1])
defect_cutoff = torch.where(torch.rand([], device=defect_ws.device) < self.style_mixing_prob, defect_cutoff, torch.full_like(defect_cutoff, defect_ws.shape[1]))
defect_ws[:, defect_cutoff:] = self.G_defect_mapping(torch.randn_like(defect_z), c, skip_w_avg_update=True)[:, defect_cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
input_list = [ws]
if transfer == 'none':
img = self.G_synthesis(ws)
elif transfer == 'dual_mod':
ws += defect_ws
img = self.G_synthesis(ws)
elif transfer == 'res_block':
img = self.G_synthesis(ws, defect_ws)
input_list.append(defect_ws)
elif transfer in ['res_block_match_dis', 'res_block_uni_dis']:
if output_mask:
img, mask = self.G_synthesis(ws, defect_ws, output_mask = output_mask)
else:
img = self.G_synthesis(ws, defect_ws, output_mask = output_mask)
input_list.append(defect_ws)
if mode_seek in ['w/mask', 'w/img', 'z/mask'] and output_mask:
half_batch = ws.shape[0] // 2
half_img, half_mask = self.G_synthesis(ws[:half_batch], defect_ws[half_batch:], output_mask = True)
if transfer in ['res_block_match_dis', 'res_block_uni_dis'] and output_mask:
if mode_seek in ['w/mask', 'z/mask']:
return img, mask, half_mask, input_list
elif mode_seek == 'w/img':
return img, mask, half_img, input_list
return img, mask, input_list
else:
return img, input_list
def run_D(self, img, c, sync):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits = self.D(img, c)
return logits
def run_D_uni(self, img, mask, c, sync):
#if self.augment_pipe is not None:
# img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits = self.D(img, mask, c)
return logits
def run_D_match(self, img_mask, c, sync):
#if self.augment_pipe is not None:
# img = self.augment_pipe(img)
with misc.ddp_sync(self.D_match, sync):
logits = self.D_match(img_mask, c)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, gen_defect_z = None, real_mask = None, mask_threshold = 0.0):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth', 'D_matchmain', 'D_matchreg', 'D_matchboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_D_matchmain = (phase in ['D_matchmain', 'D_matchboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
do_D_matchr1 = (phase in ['D_matchreg', 'D_matchboth']) and (self.r1_gamma != 0)
# print({
# 'do_Gmain': do_Gmain,
# 'do_Dmain': do_Dmain,
# 'do_D_matchmain': do_D_matchmain,
# 'do_Gpl': do_Gpl,
# 'do_Dr1': do_Dr1,
# 'do_D_matchr1': do_D_matchr1,
# })
# Gmain: Maximize logits for generated images.
if do_Gmain:
if self.mode_seek != 'none':
assert gen_z.shape[0] % 2 == 0
with torch.autograd.profiler.record_function('Gmain_forward'):
if self.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
if self.mode_seek == 'none':
gen_img, gen_mask, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True) # May get synced by Gpl.
elif self.mode_seek in ['w/mask', 'z/mask']:
gen_img, gen_mask, gen_half_mask, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True, mode_seek = self.mode_seek) # May get synced by Gpl.
elif self.mode_seek == 'w/img':
gen_img, gen_mask, gen_half_img, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer, output_mask = True, mode_seek = self.mode_seek) # May get synced by Gpl.
else:
gen_img, inputs = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl), defect_z = gen_defect_z, transfer = self.transfer) # May get synced by Gpl.
if self.transfer == 'res_block_uni_dis':
gen_logits = self.run_D_uni(gen_img, gen_mask, gen_c, sync=False)
else:
gen_logits = self.run_D(gen_img, gen_c, sync=False)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
if self.transfer == 'res_block_match_dis':
if self.tanh_mask == 'late':
gen_mask = torch.tanh(self.tanh_k * gen_mask)
if self.mode_seek in ['w/mask', 'z/mask']:
gen_half_mask = torch.tanh(self.tanh_k * gen_half_mask)
gen_img_mask = torch.cat([gen_img, gen_mask], dim = 1)
gen_match_logits = self.run_D_match(gen_img_mask, gen_c, sync=False)
training_stats.report('Loss/scores/fake_match', gen_match_logits)
training_stats.report('Loss/signs/fake_match', gen_match_logits.sign())
loss_Gmain = loss_Gmain + self.lambda_match * torch.nn.functional.softplus(-gen_match_logits)
if self.mode_seek in ['w/mask', 'w/img', 'z/mask']:
assert len(inputs) == 2
assert gen_z.shape[0] % 2 == 0
half_batch_size = gen_z.shape[0] // 2
if self.mode_seek in ['w/mask', 'w/img']:
w = inputs[1]
w1, w2 = w[:half_batch_size], w[half_batch_size:]
if self.mode_seek == 'w/mask':
mask1, mask2 = gen_mask[:half_batch_size], gen_half_mask
loss_MS = (w1 - w2).abs().mean() / (mask1 - mask2).abs().mean()
elif self.mode_seek == 'w/img':
img1, img2 = gen_img[:half_batch_size], gen_half_img
loss_MS = (w1 - w2).abs().mean() / (img1 - img2).abs().mean()
elif self.mode_seek == 'z/mask':
z1, z2 = gen_defect_z[:half_batch_size], gen_defect_z[half_batch_size:]
mask1, mask2 = gen_mask[:half_batch_size], gen_half_mask
loss_MS = (z1 - z2).abs().mean() / (mask1 - mask2).abs().mean()
training_stats.report('Loss/mode_seek', loss_MS)
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
(loss_Gmain if self.mode_seek == 'none' else loss_Gmain + self.lambda_ms * loss_MS).mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, input_list = self.run_G(gen_z[:batch_size], gen_c[:batch_size], sync=sync, defect_z = gen_defect_z[:batch_size] if gen_defect_z is not None else None, transfer = self.transfer)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=input_list, create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
if self.transfer == 'res_block_uni_dis':
gen_img, gen_mask, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer, output_mask = True)
gen_logits = self.run_D_uni(gen_img, gen_mask, gen_c, sync=False)
else:
gen_img, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer)
gen_logits = self.run_D(gen_img, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
if self.transfer == 'res_block_uni_dis':
real_mask_tmp = real_mask.detach().requires_grad_(do_Dr1)
real_logits = self.run_D_uni(real_img_tmp, real_mask_tmp, real_c, sync=sync)
else:
real_logits = self.run_D(real_img_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# D_matchmain: Minimize matching logits for generated images&masks.
loss_D_matchgen = 0
if do_D_matchmain:
with torch.autograd.profiler.record_function('D_matchgen_forward'):
gen_img, gen_mask, _ = self.run_G(gen_z, gen_c, sync=False, defect_z = gen_defect_z, transfer = self.transfer, output_mask = True)
if self.tanh_mask == 'late':
gen_mask = torch.tanh(self.tanh_k * gen_mask)
gen_img_mask = torch.cat([gen_img, gen_mask], dim = 1)
gen_logits = self.run_D_match(gen_img_mask, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake_match', gen_logits)
training_stats.report('Loss/signs/fake_match', gen_logits.sign())
loss_D_matchgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('D_matchgen_backward'):
loss_D_matchgen.mean().mul(gain).backward()
# D_matchmain: Maximize matching logits for real images&masks.
# D_matchr1: Apply R1 regularization.
if do_D_matchmain or do_D_matchr1:
name = 'D_matchreal_Dr1' if do_D_matchmain and do_D_matchr1 else 'D_matchreal' if do_D_matchmain else 'D_matchr1'
with torch.autograd.profiler.record_function(name + '_forward_match'):
real_img_tmp = real_img.detach().requires_grad_(do_D_matchr1)
real_mask_tmp = real_mask.detach().requires_grad_(do_D_matchr1)
real_img_mask_tmp = torch.cat([real_img_tmp, real_mask_tmp], dim = 1)
real_logits = self.run_D_match(real_img_mask_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real_match', real_logits)
training_stats.report('Loss/signs/real_match', real_logits.sign())
loss_D_matchreal = 0
if do_D_matchmain:
loss_D_matchreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D_match/loss', loss_D_matchgen + loss_D_matchreal)
loss_D_matchr1 = 0
if do_D_matchr1:
with torch.autograd.profiler.record_function('r1_grads_match'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_mask_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_D_matchr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty_match', r1_penalty)
training_stats.report('Loss/D_match/reg', loss_D_matchr1)
with torch.autograd.profiler.record_function(name + '_backward_match'):
(real_logits * 0 + loss_D_matchreal + loss_D_matchr1).mean().mul(gain).backward()
#----------------------------------------------------------------------------
| 18,203 | 56.974522 | 266 | py |
DFMGAN | DFMGAN-main/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,373 | 60.050926 | 366 | py |
DFMGAN | DFMGAN-main/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return min(self.image_shape[0], 3)
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if ((self._file_ext(fname) in PIL.Image.EXTENSION) or self._file_ext(fname) == '.npy'))
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
elif self._file_ext(fname) == '.npy':
image = np.load(f)
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
| 8,683 | 35.334728 | 159 | py |
DFMGAN | DFMGAN-main/training/networks.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.kernel_size = kernel_size
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
auto_padding = self.kernel_size // 2
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv, padding = auto_padding)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
torgb_type, # 'none', 'rgb', 'gen_mask', 'upsample_mask'
no_round,
tanh_mask,
tanh_k,
img_resolution = None,
mask_threshold = None,
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
self.torgb_type = torgb_type
self.mask_threshold = mask_threshold
self.no_round = no_round
self.tanh_mask = tanh_mask
self.tanh_k = tanh_k
self.img_resolution = img_resolution
self.upsample_pad = torch.nn.ReplicationPad2d(padding = 1)
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if (is_last or architecture == 'skip') and (torgb_type in ['rgb', 'gen_mask']):
if torgb_type == 'rgb':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
# All ToRGB
elif torgb_type == 'gen_mask':
self.torgb = ToRGBLayer(out_channels, 1, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, res_x = None, force_fp32=False, fused_modconv=None, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ResBlock exists
if res_x is not None:
assert x.dtype == res_x.dtype and x.shape == res_x.shape
x = x + res_x
#x = torch.where(res_x == 0.0, x, res_x)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels if self.torgb_type == 'rgb' else 1, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if (self.is_last or self.architecture == 'skip') and (self.torgb_type in ['rgb', 'gen_mask']):
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32 if self.torgb_type == 'rgb' else torch.float16, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
if self.torgb_type == 'gen_mask' and self.tanh_mask == 'early':
img = torch.tanh(self.tanh_k * img)
if self.torgb_type in ['gen_mask', 'upsample_mask']:
assert(x.shape[0] == img.shape[0] and img.shape[1] == 1 and x.shape[2] == img.shape[2] and x.shape[3] == img.shape[3])
if self.no_round:
x = x * (img / 2.0 + 0.5)
else:
x = x * (img >= self.mask_threshold)
assert x.dtype == dtype
if self.torgb_type == 'none':
return x
assert img.dtype == (torch.float32 if self.torgb_type == 'rgb' else torch.float16)
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'rgb', **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisResNet(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
res_st,
mask_threshold = 0.0,
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.res_st = res_st
self.num_ws = 0
self.num_defect_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'rgb', **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
if res >= self.res_st:
res_block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, torgb_type = 'gen_mask' if res == self.res_st else 'upsample_mask', mask_threshold=mask_threshold, **block_kwargs)
self.num_defect_ws += res_block.num_conv
if res == self.res_st:
self.num_defect_ws += 1
setattr(self, f'res_b{res}', res_block)
def forward(self, ws, defect_ws, fix_residual_to_zero = False, output_mask = False, **block_kwargs):
block_ws, res_block_ws = [], [None for _ in range(int(np.log2(self.res_st)) - 2)]
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
misc.assert_shape(defect_ws, [None, self.num_defect_ws, self.w_dim])
ws = ws.to(torch.float32)
defect_ws = defect_ws.to(torch.float32)
w_idx = defect_w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
if res >= self.res_st:
res_block = getattr(self, f'res_b{res}')
res_block_ws.append(defect_ws.narrow(1, defect_w_idx, res_block.num_conv + res_block.num_torgb))
defect_w_idx += res_block.num_conv
x = img = None
for res, cur_ws, cur_res_ws in zip(self.block_resolutions, block_ws, res_block_ws):
res_x = None
if res >= self.res_st and not fix_residual_to_zero:
res_block = getattr(self, f'res_b{res}')
res_x, mask = res_block(x, None if res == self.res_st else mask, cur_res_ws, **block_kwargs)
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, res_x = res_x, **block_kwargs)
if output_mask:
return img, mask
else:
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
transfer,
mask_threshold = 0.0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.transfer = transfer
self.mask_threshold = mask_threshold
if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
self.synthesis = SynthesisResNet(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, mask_threshold=self.mask_threshold, **synthesis_kwargs)
else:
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=0 if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis'] else c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
if self.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
self.num_defect_ws = self.synthesis.num_defect_ws
self.defect_mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_defect_ws, **mapping_kwargs)
def forward(self, z, c, defect_z=None, output_mask = False, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
if self.transfer == 'dual_mod':
defect_ws = self.defect_mapping(defect_z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
ws += defect_ws
if self.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_ws = self.defect_mapping(defect_z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
if output_mask:
img, mask = self.synthesis(ws, defect_ws, output_mask = output_mask, **synthesis_kwargs)
else:
img = self.synthesis(ws, defect_ws, **synthesis_kwargs)
else:
img = self.synthesis(ws, **synthesis_kwargs)
if output_mask:
return img, mask
else:
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
block_type = 'rgb', # 'rgb', 'mask', 'uni'
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.block_type = block_type
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels if self.block_type == 'rgb' else 1, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels if self.block_type == 'rgb' else 1, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, **block_kwargs):
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorUnified(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
uni_st,
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.uni_st = uni_st
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
if res > self.uni_st:
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'rgb', **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
mask_block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'mask', **block_kwargs, **common_kwargs)
setattr(self, f'mask_b{res}', mask_block)
else:
block = DiscriminatorBlock((in_channels * 2) if res == self.uni_st else in_channels, (tmp_channels * 2) if res == self.uni_st else tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, block_type = 'uni', **block_kwargs, **common_kwargs)
setattr(self, f'uni_b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.uni_b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, mask, c, **block_kwargs):
x = None
x_mask = None
for res in self.block_resolutions:
if res > self.uni_st:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
mask_block = getattr(self, f'mask_b{res}')
x_mask, mask = mask_block(x_mask, mask, **block_kwargs)
else:
if res == self.uni_st:
x_uni = torch.cat([x, x_mask], dim = 1)
uni_block = getattr(self, f'uni_b{res}')
x_uni, _ = uni_block(x_uni, None, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.uni_b4(x_uni, None, cmap)
return x
#---------------------------------------------------------------------------- | 49,430 | 50.544317 | 199 | py |
DFMGAN | DFMGAN-main/training/training_loop.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from re import L
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
import itertools
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(2560 // training_set.image_shape[2], 7, 32)
gh = np.clip(2048 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape(gh, gw, C, H, W)
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape(gh * H, gw * W, C)
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = None, # EMA ramp-up coefficient.
G_reg_interval = 4, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
# DFMGAN args
ft = None,
D_match_kwargs = {},
D_match_opt_kwargs = {},
D_match_reg_interval = 16,
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
if G_kwargs.transfer == 'res_block_match_dis':
common_match_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels + 1)
D_match = dnnlib.util.construct_class_by_name(**D_match_kwargs, **common_match_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
copied_list = misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# if rank == 0:
# print('%s copied parameters and buffers:' % name)
# print(copied_list)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
input_list = [z, c]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.empty([batch_gpu, G.z_dim], device=device)
input_list.append(defect_z)
if G_kwargs.transfer == 'res_block_match_dis':
input_list.append(True)
img, mask = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, c])
misc.print_module_summary(D_match, [torch.cat([img, mask], dim = 1), c])
elif G_kwargs.transfer == 'res_block_uni_dis':
input_list.append(True)
img, mask = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, mask, c])
else:
img = misc.print_module_summary(G, input_list)
misc.print_module_summary(D, [img, c])
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
ddp_modules = dict()
module_list = [('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('D', D), (None, G_ema), ('augment_pipe', augment_pipe)]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
module_list.append(('G_defect_mapping', G.defect_mapping))
if G_kwargs.transfer == 'res_block_match_dis':
module_list.append(('D_match', D_match))
for name, module in module_list:
if (num_gpus > 1) and (module is not None) and len(list(module.parameters())) != 0:
module.requires_grad_(True)
module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False, find_unused_parameters = True)
module.requires_grad_(False)
if name is not None:
ddp_modules[name] = module
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
phases = []
training_nets = [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]
if G_kwargs.transfer == 'res_block_match_dis':
training_nets.append(('D_match', D_match, D_match_opt_kwargs, D_match_reg_interval))
for name, module, opt_kwargs, reg_interval in training_nets:
num_param = 0
if name == 'D':
if G_kwargs.transfer == 'res_block_uni_dis':
for res in [4, 8, 16, 32, 64, 128, 256]:
target_param = []
if res > D_kwargs.uni_st:
num_param += sum([p.numel() for p in getattr(module, f'mask_b{res}').parameters()])
target_param.append(getattr(module, f'mask_b{res}').parameters())
else:
num_param += sum([p.numel() for p in getattr(module, f'uni_b{res}').parameters()])
target_param.append(getattr(module, f'uni_b{res}').parameters())
target_param = itertools.chain(*target_param)
else:
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif name == 'D_match':
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif name == 'G':
if ft == 'default':
num_param = sum([p.numel() for p in module.parameters()])
target_param = module.parameters()
elif ft == 'ft_map':
num_param = sum([p.numel() for p in module.mapping.parameters()])
target_param = module.mapping.parameters()
elif ft == 'ft_syn':
num_param = sum([p.numel() for p in module.synthesis.parameters()])
target_param = module.synthesis.parameters()
elif ft.startswith('ft_syn_'):
num_trainable_block = int(ft.split('_')[-1])
syn_modules = [module.synthesis.b4, module.synthesis.b8, module.synthesis.b16, module.synthesis.b32, module.synthesis.b64, module.synthesis.b128, module.synthesis.b256]
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]])
num_param = sum([p.numel() for p in target_param])
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]])
elif ft.startswith('ft_map_syn_'):
num_trainable_block = int(ft.split('_')[-1])
syn_modules = [module.synthesis.b4, module.synthesis.b8, module.synthesis.b16, module.synthesis.b32, module.synthesis.b64, module.synthesis.b128, module.synthesis.b256]
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]], module.mapping.parameters())
num_param = sum([p.numel() for p in target_param])
target_param = itertools.chain(*[mod.parameters() for mod in syn_modules[:num_trainable_block]], module.mapping.parameters())
elif ft == 'transfer':
if G_kwargs.transfer == 'dual_mod':
target_param = module.defect_mapping.parameters()
num_param = sum([p.numel() for p in target_param])
target_param = module.defect_mapping.parameters()
elif G_kwargs.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
target_param = [module.defect_mapping.parameters()]
num_param += sum([p.numel() for p in module.defect_mapping.parameters()])
for res in [4, 8, 16, 32, 64, 128, 256]:
if res >= G_kwargs.synthesis_kwargs.res_st:
target_param.append(getattr(module.synthesis, f'res_b{res}').parameters())
num_param += sum([p.numel() for p in getattr(module.synthesis, f'res_b{res}').parameters()])
target_param = itertools.chain(*target_param)
if rank == 0:
print('Training %d params of %s' % (num_param, name))
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=target_param, **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(target_param, **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
grid_defect_z = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
images = images[:, :3, :, :]
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
if G_kwargs.transfer == 'none':
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
elif G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
grid_defect_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, defect_z=defect_z, noise_mode='const').cpu() for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = 0
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
assert phase_real_img.ndim == 4 and phase_real_img.shape[1] == 4
phase_real_img, phase_real_mask = phase_real_img[:, :3, :, :], phase_real_img[:, 3:, :, :]
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
phase_real_mask = (phase_real_mask.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
all_gen_defect_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_defect_z = [phase_gen_defect_z.split(batch_gpu) for phase_gen_defect_z in all_gen_defect_z.split(batch_size)]
# Execute training phases.
if G_kwargs.transfer == 'none':
zip_iter = zip(phases, all_gen_z, all_gen_c)
elif G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
zip_iter = zip(phases, all_gen_z, all_gen_c, all_gen_defect_z)
for iter_cmbn in zip_iter:
phase, phase_gen_z, phase_gen_c = iter_cmbn[:3]
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
phase_gen_defect_z = iter_cmbn[3]
if batch_idx % phase.interval != 0:
continue
# Initialize gradient accumulation.
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
# Accumulate gradients over multiple rounds.
if G_kwargs.transfer == 'none':
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c))
elif G_kwargs.transfer in ['dual_mod', 'res_block']:
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c, phase_gen_defect_z))
elif G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
enum_iter = enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c, phase_gen_defect_z, phase_real_mask))
for round_idx, iter_cmbn_2 in enum_iter:
real_img, real_c, gen_z, gen_c = iter_cmbn_2[:4]
gen_defect_z = None
real_mask = None
gen_defect_z2 = None
if G_kwargs.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
gen_defect_z = iter_cmbn_2[4]
if G_kwargs.transfer in ['res_block_match_dis', 'res_block_uni_dis']:
real_mask = iter_cmbn_2[5]
sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)
gain = phase.interval
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain,
gen_defect_z = gen_defect_z, real_mask = real_mask, mask_threshold = G_kwargs.mask_threshold)
# Update weights.
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function(phase.name + '_opt'):
for param in phase.module.parameters():
if param.grad is not None:
misc.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
phase.opt.step()
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in stats_collector.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
if G_kwargs.transfer == 'none':
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
elif G_kwargs.transfer == 'dual_mod':
images = torch.cat([G_ema(z=z, c=c, defect_z=defect_z, noise_mode='const').cpu() for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z)]).numpy()
elif G_kwargs.transfer in ['res_block', 'res_block_match_dis', 'res_block_uni_dis']:
images, masks = [], []
for z, c, defect_z in zip(grid_z, grid_c, grid_defect_z):
image, mask = G_ema(z=z, c=c, defect_z=defect_z, output_mask = True, noise_mode='const')
images.append(image.cpu())
masks.append(mask.cpu())
images = torch.cat(images).numpy()
masks = torch.cat(masks).numpy()
save_image_grid(masks, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_mask.png'), drange=[-1,1], grid_size=grid_size)
masks[masks >= G_kwargs.mask_threshold] = 1.0
masks[masks < G_kwargs.mask_threshold] = -1.0
save_image_grid(masks, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_rounded_mask.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_img.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
saving_modules = [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]
if G_kwargs.transfer == 'res_block_match_dis':
saving_modules.append(('D_match', D_match))
for name, module in saving_modules:
if module is not None:
if num_gpus > 1:
misc.check_ddp_consistency(module, ignore_regex=r'.*\.w_avg')
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
snapshot_data[name] = module
del module # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 31,481 | 53.27931 | 184 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.