id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,792 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def create_model(opt):
model = find_model_class_by_name(opt.model)
instance = model()
instance.initialize(opt)
print("model [{}] was created".format(instance.name()))
return instance
def gen_points_filter_embeddings(dataset, visualizer, opt):
print('-----------------------------------Generate Points-----------------------------------')
opt.is_train=False
opt.mode = 1
model = create_model(opt)
model.setup(opt)
model.eval()
cam_xyz_all = []
intrinsics_all = []
extrinsics_all = []
confidence_all = []
points_mask_all = []
intrinsics_full_lst = []
confidence_filtered_all = []
near_fars_all = []
gpu_filter = True
cpu2gpu= len(dataset.view_id_list) > 300
imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[]
with torch.no_grad():
for i in tqdm(range(0, len(dataset.view_id_list))):
data = dataset.get_init_item(i)
model.set_input(data)
# intrinsics 1, 3, 3, 3
points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points()
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
B, N, C, H, W, _ = points_xyz_lst[0].shape
# print("points_xyz_lst",points_xyz_lst[0].shape)
cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy())
# intrinsics_lst[0] 1, 3, 3
intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0])
extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy())
if opt.manual_depth_view !=0:
confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy())
points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy())
imgs_lst.append(data["images"].cpu())
HDWD_lst.append(HDWD)
c2ws_lst.append(c2ws)
w2cs_lst.append(w2cs)
intrinsics_full_lst.append(intrinsics)
near_fars_all.append(near_fars[0,0])
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
# #################### start query embedding ##################
torch.cuda.empty_cache()
if opt.manual_depth_view != 0:
if gpu_filter:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all)
else:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt)
# print(xyz_ref_lst[0].shape) # 224909, 3
else:
cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))]
xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))]
xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt)
del cam_xyz_all
# for i in range(len(xyz_world_all)):
# visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0)
# exit()
# xyz_world_all = xyz_world_all.cuda()
# confidence_filtered_all = confidence_filtered_all.cuda()
points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0)
xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor(
np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32)
confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32)
print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape)
torch.cuda.empty_cache()
# visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False)
# print("vis 0")
print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None))
if getattr(dataset, "spacemin", None) is not None:
mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0
mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0
mask = torch.prod(mask, dim=-1) > 0
first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
# visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False)
# print("vis 50")
if getattr(dataset, "alphas", None) is not None:
vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt)
first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape)
# visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0)
# print("vis 100")
if opt.vox_res > 0:
xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res)
points_vid = points_vid[sampled_pnt_idx,:]
confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx]
print("after voxelize:", xyz_world_all.shape, points_vid.shape)
xyz_world_all = xyz_world_all.cuda()
xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))]
confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))]
cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))]
points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], []
for i in tqdm(range(len(HDWD_lst))):
if len(xyz_world_all[i]) > 0:
embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True)
points_embedding_all.append(embedding)
points_color_all.append(color)
points_dir_all.append(dir)
points_conf_all.append(conf)
xyz_world_all = torch.cat(xyz_world_all, dim=0)
points_embedding_all = torch.cat(points_embedding_all, dim=1)
points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None
points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None
points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None
visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0)
print("vis")
model.cleanup()
del model
return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst] | null |
156,793 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count = 0;
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
# xyz_world_sect_plane_lst.append(xyz_world_sect_plane)
model.test()
curr_visuals = model.get_current_visuals(data=data)
# print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda())))
# print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:])))
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
if len(ray_masks) > 0:
ray_masks = torch.cat(ray_masks, dim=1)
# visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True)
# exit()
# print("curr_visuals",curr_visuals)
pixel_idx=pixel_idx.to(torch.long)
gt_image = torch.zeros((height*width, 3), dtype=torch.float32)
gt_image[edge_mask, :] = tmpgts['gt_image'].clone()
if 'gt_image' in model.visual_names:
visuals['gt_image'] = gt_image
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
if key in opt.visual_items:
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i, opt=opt)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone()
# tmpgtssave[~ray_masks,:] = 1.0
# img = np.array(tmpgtssave.view(height,width,3))
# save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3))
# csave[~ray_masks, :] = 1.0
# csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:]
# img = np.array(csave.view(height, width, 3))
# save_image(img, filepath)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count+=1
visualizer.print_losses(count)
psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
# visualizer.reset()
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1]))
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps)
print('--------------------------------Finish generating vid--------------------------------')
return psnr
def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True):
print('-----------------------------------Rendering-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if key == "gt_image": continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
if key == "gt_image": continue
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return | null |
156,794 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst
def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.total
print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
count = 0;
for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step
data = dataset.get_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool)
edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1
edge_mask=edge_mask.reshape(-1) > 0
np_edge_mask=edge_mask.numpy().astype(bool)
totalpixel = pixel_idx.shape[1]
tmpgts = {}
tmpgts["gt_image"] = data['gt_image'].clone()
tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None
# data.pop('gt_image', None)
data.pop('gt_mask', None)
visuals = None
stime = time.time()
ray_masks = []
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
if opt.bgmodel.endswith("plane"):
img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info
if len(bg_ray_lst) > 0:
bg_ray_all = bg_ray_lst[data["id"]]
bg_idx = data["pixel_idx"].view(-1,2)
bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :]
else:
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer)
data["bg_ray"] = bg_ray
# xyz_world_sect_plane_lst.append(xyz_world_sect_plane)
model.test()
curr_visuals = model.get_current_visuals(data=data)
# print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda())))
# print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:])))
chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk
else:
for key, value in curr_visuals.items():
if value is None or key=="gt_image":
continue
visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy()
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_masks.append(model.output["ray_mask"] > 0)
if len(ray_masks) > 0:
ray_masks = torch.cat(ray_masks, dim=1)
# visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True)
# exit()
# print("curr_visuals",curr_visuals)
pixel_idx=pixel_idx.to(torch.long)
gt_image = torch.zeros((height*width, 3), dtype=torch.float32)
gt_image[edge_mask, :] = tmpgts['gt_image'].clone()
if 'gt_image' in model.visual_names:
visuals['gt_image'] = gt_image
if 'gt_mask' in curr_visuals:
visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype)
visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask']
if 'ray_masked_coarse_raycolor' in model.visual_names:
visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape)
visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
if 'ray_depth_masked_coarse_raycolor' in model.visual_names:
visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3)
visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'ray_depth_masked_gt_image' in model.visual_names:
visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0
if 'gt_image_ray_masked' in model.visual_names:
visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3)
visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0
for key, value in visuals.items():
if key in opt.visual_items:
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i, opt=opt)
acc_dict = {}
if "coarse_raycolor" in opt.test_color_loss_items:
loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda())
acc_dict.update({"coarse_raycolor": loss})
print("coarse_raycolor", loss, mse2psnr(loss))
if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items:
masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3)
ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3)
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt")
# filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone()
# tmpgtssave[~ray_masks,:] = 1.0
# img = np.array(tmpgtssave.view(height,width,3))
# save_image(img, filepath)
#
# filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor")
# filepath = os.path.join(
# "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename)
# csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3))
# csave[~ray_masks, :] = 1.0
# csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:]
# img = np.array(csave.view(height, width, 3))
# save_image(img, filepath)
loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss)))
if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items:
ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1)
masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3))
ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3))
loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt)
acc_dict.update({"ray_depth_masked_coarse_raycolor": loss})
visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss)))
print(acc_dict.items())
visualizer.accumulate_losses(acc_dict)
count+=1
visualizer.print_losses(count)
psnr = visualizer.get_psnr(opt.test_color_loss_items[0])
# visualizer.reset()
print('--------------------------------Finish Test Rendering--------------------------------')
report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1]))
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps)
print('--------------------------------Finish generating vid--------------------------------')
return psnr
def bloat_inds(inds, shift, height, width):
inds = inds[:,None,:]
sx, sy = torch.meshgrid(torch.arange(-shift, shift+1, dtype=torch.long), torch.arange(-shift, shift+1, dtype=torch.long))
shift_inds = torch.stack([sx, sy],dim=-1).reshape(1, -1, 2).cuda()
inds = inds + shift_inds
inds = inds.reshape(-1, 2)
inds[...,0] = torch.clamp(inds[...,0], min=0, max=height-1)
inds[...,1] = torch.clamp(inds[...,1], min=0, max=width-1)
return inds
def probe_hole(model, dataset, visualizer, opt, bg_info, test_steps=0, opacity_thresh=0.7):
print('-----------------------------------Probing Holes-----------------------------------')
add_xyz = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_conf = torch.zeros([0, 1], device="cuda", dtype=torch.float32)
add_color = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_dir = torch.zeros([0, 3], device="cuda", dtype=torch.float32)
add_embedding = torch.zeros([0, opt.point_features_dim], device="cuda", dtype=torch.float32)
kernel_size = model.opt.kernel_size
if opt.prob_kernel_size is not None:
tier = np.sum(np.asarray(opt.prob_tiers) < test_steps)
print("cal by tier", tier)
model.opt.query_size = np.asarray(opt.prob_kernel_size[tier*3:tier*3+3])
print("prob query size =", model.opt.query_size)
model.opt.prob = 1
total_num = len(model.top_ray_miss_ids) -1 if opt.prob_mode == 0 and opt.prob_num_step > 1 else len(dataset)
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
max_num = len(dataset) // opt.prob_num_step
take_top = False
if opt.prob_top == 1 and opt.prob_mode <= 0: # and opt.far_thresh <= 0:
if getattr(model, "top_ray_miss_ids", None) is not None:
mask = model.top_ray_miss_loss[:-1] > 0.0
frame_ids = model.top_ray_miss_ids[:-1][mask][:max_num]
print(len(frame_ids), max_num)
print("prob frame top_ray_miss_loss:", model.top_ray_miss_loss)
take_top = True
else:
print("model has no top_ray_miss_ids")
else:
frame_ids = list(range(len(dataset)))[:max_num]
random.shuffle(frame_ids)
frame_ids = frame_ids[:max_num]
print("{}/{} has holes, id_lst to prune".format(len(frame_ids), total_num), frame_ids, opt.prob_num_step)
print("take top:", take_top, "; prob frame ids:", frame_ids)
with tqdm(range(len(frame_ids))) as pbar:
for j in pbar:
i = frame_ids[j]
pbar.set_description("Processing frame id %d" % i)
data = dataset.get_item(i)
bg = data['bg_color'][None, :].cuda()
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
edge_mask = torch.zeros([height, width], dtype=torch.bool, device='cuda')
edge_mask[pixel_idx[0, ..., 1].to(torch.long), pixel_idx[0, ..., 0].to(torch.long)] = 1
edge_mask = edge_mask.reshape(-1) > 0
totalpixel = pixel_idx.shape[1]
gt_image_full = data['gt_image'].cuda()
probe_keys = ["coarse_raycolor", "ray_mask", "ray_max_sample_loc_w", "ray_max_far_dist", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding"]
prob_maps = {}
for k in range(0, totalpixel, chunk_size):
start = k
end = min([k + chunk_size, totalpixel])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
output = model.test()
chunk_pixel_id = data["pixel_idx"].to(torch.long)
output["ray_mask"] = output["ray_mask"][..., None]
for key in probe_keys:
if "ray_max_shading_opacity" not in output and key != 'coarse_raycolor':
break
if output[key] is None:
prob_maps[key] = None
else:
if key not in prob_maps.keys():
C = output[key].shape[-1]
prob_maps[key] = torch.zeros((height, width, C), device="cuda", dtype=output[key].dtype)
prob_maps[key][chunk_pixel_id[0, ..., 1], chunk_pixel_id[0, ..., 0], :] = output[key]
gt_image = torch.zeros((height * width, 3), dtype=torch.float32, device=prob_maps["ray_mask"].device)
gt_image[edge_mask, :] = gt_image_full
gt_image = gt_image.reshape(height, width, 3)
miss_ray_mask = (prob_maps["ray_mask"] < 1) * (torch.norm(gt_image - bg, dim=-1, keepdim=True) > 0.002)
miss_ray_inds = (edge_mask.reshape(height, width, 1) * miss_ray_mask).squeeze(-1).nonzero() # N, 2
neighbor_inds = bloat_inds(miss_ray_inds, 1, height, width)
neighboring_miss_mask = torch.zeros_like(gt_image[..., 0])
neighboring_miss_mask[neighbor_inds[..., 0], neighbor_inds[...,1]] = 1
if opt.far_thresh > 0:
far_ray_mask = (prob_maps["ray_mask"] > 0) * (prob_maps["ray_max_far_dist"] > opt.far_thresh) * (torch.norm(gt_image - prob_maps["coarse_raycolor"], dim=-1, keepdim=True) < 0.1)
neighboring_miss_mask += far_ray_mask.squeeze(-1)
neighboring_miss_mask = (prob_maps["ray_mask"].squeeze(-1) > 0) * neighboring_miss_mask * (prob_maps["ray_max_shading_opacity"].squeeze(-1) > opacity_thresh) > 0
add_xyz = torch.cat([add_xyz, prob_maps["ray_max_sample_loc_w"][neighboring_miss_mask]], dim=0)
add_conf = torch.cat([add_conf, prob_maps["shading_avg_conf"][neighboring_miss_mask]], dim=0) * opt.prob_mul if prob_maps["shading_avg_conf"] is not None else None
add_color = torch.cat([add_color, prob_maps["shading_avg_color"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_color"] is not None else None
add_dir = torch.cat([add_dir, prob_maps["shading_avg_dir"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_dir"] is not None else None
add_embedding = torch.cat([add_embedding, prob_maps["shading_avg_embedding"][neighboring_miss_mask]], dim=0)
if len(add_xyz) > -1:
output = prob_maps["coarse_raycolor"].permute(2,0,1)[None, None,...]
visualizer.save_ref_views({"images": output}, i, subdir="prob_img_{:04d}".format(test_steps))
model.opt.kernel_size = kernel_size
if opt.bgmodel.startswith("planepoints"):
mask = dataset.filter_plane(add_xyz)
first_lst, _ = masking(mask, [add_xyz, add_embedding, add_color, add_dir, add_conf], [])
add_xyz, add_embedding, add_color, add_dir, add_conf = first_lst
if len(add_xyz) > 0:
visualizer.save_neural_points("prob{:04d}".format(test_steps), add_xyz, None, None, save_ref=False)
visualizer.print_details("vis added points to probe folder")
if opt.prob_mode == 0 and opt.prob_num_step > 1:
model.reset_ray_miss_ranking()
del visualizer, prob_maps
model.opt.prob = 0
return add_xyz, add_embedding, add_color, add_dir, add_conf | null |
156,795 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] | null |
156,796 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def create_all_bg(dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=False):
total_num = dataset.total
height = dataset.height
width = dataset.width
bg_ray_lst = []
random_sample = dataset.opt.random_sample
for i in range(0, total_num):
dataset.opt.random_sample = "no_crop"
if dummy:
data = dataset.get_dummyrot_item(i)
else:
data = dataset.get_item(i)
raydir = data['raydir'].clone()
# print("data['pixel_idx']",data['pixel_idx'].shape) # 1, 512, 640, 2
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
start=0
end = height * width
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
model.set_input(data)
xyz_world_sect_plane = mvs_utils.gen_bg_points(data)
bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"])
bg_ray = bg_ray.reshape(bg_ray.shape[0], height, width, 3) # 1, 512, 640, 3
bg_ray_lst.append(bg_ray)
dataset.opt.random_sample = random_sample
return bg_ray_lst | null |
156,797 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps):
print("total:", xyz.shape, points_color.shape, points_conf.shape)
colors, confs = points_color[0], points_conf[0,...,0]
pre = -1000
for i in range(12):
thresh = (i * 0.1) if i <= 10 else 1000
mask = ((confs <= thresh) * (confs > pre)) > 0
thresh_xyz = xyz[mask, :]
thresh_color = colors[mask, :]
visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False)
pre = thresh
exit() | null |
156,798 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def create_dataset(opt):
dataset = find_dataset_class_by_name(opt.dataset_name)
instance = dataset()
instance.initialize(opt)
print("dataset [{}] was created".format(instance.name()))
return instance
def create_render_dataset(test_opt, opt, total_steps, test_num_step=1):
test_opt.nerf_splits = ["render"]
test_opt.split = "render"
test_opt.name = opt.name + "/vid_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_opt.random_sample_size = 30
test_dataset = create_dataset(test_opt)
return test_dataset | null |
156,799 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def create_dataset(opt):
dataset = find_dataset_class_by_name(opt.dataset_name)
instance = dataset()
instance.initialize(opt)
print("dataset [{}] was created".format(instance.name()))
return instance
def create_test_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["test"]
test_opt.split = "test"
test_opt.name = opt.name + "/test_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset | null |
156,800 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def create_dataset(opt):
dataset = find_dataset_class_by_name(opt.dataset_name)
instance = dataset()
instance.initialize(opt)
print("dataset [{}] was created".format(instance.name()))
return instance
def create_comb_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1):
test_opt.prob = prob if prob is not None else test_opt.prob
test_opt.nerf_splits = ["comb"]
test_opt.split = "comb"
test_opt.name = opt.name + "/comb_{}".format(total_steps)
test_opt.test_num_step = test_num_step
test_dataset = create_dataset(test_opt)
return test_dataset | null |
156,801 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
np.random.seed(0)
from tqdm import tqdm
import cv2
from PIL import Image
import imageio
from utils.util import to8b
def read_image(filepath, dtype=None):
image = np.asarray(Image.open(filepath))
if dtype is not None and dtype == np.float32:
image = (image / 255).astype(dtype)
return image
def to8b(x): return (255*np.clip(x, 0, 1)).astype(np.uint8)
def render_grow(pnt_dir, iters, vids):
print('-----------------------------------Rendering Grow-----------------------------------')
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
for t in tqdm(range(len(vids))):
vid = vids[t]
img_lst = []
for iter in iters:
img_dir = os.path.join(pnt_dir, 'prob_img_{}'.format(iter))
# ''step-{:04d}-{}.png'.format(i, name))
img_filepath = os.path.join(img_dir, "step-{}-0-ref0.png".format(vid))
img_arry = read_image(img_filepath, dtype=np.float32)
img_lst.append(img_arry)
stacked_imgs = [to8b(img_arry) for img_arry in img_lst]
filename = 'grow_video_{:04d}.mov'.format(vid)
imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, quality=8)
filename = 'grow_video_{:04d}.gif'.format(vid)
imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, format='GIF')
return | null |
156,805 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from pprint import pprint
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
torch.manual_seed(0)
np.random.seed(0)
import random
import cv2
from PIL import Image
from tqdm import tqdm
import gc
def masking(mask, firstdim_lst, seconddim_lst):
def create_model(opt):
def gen_points_filter_embeddings(dataset, visualizer, opt):
print('-----------------------------------Generate Points-----------------------------------')
opt.is_train=False
opt.mode = 1
model = create_model(opt)
model.setup(opt)
model.eval()
cam_xyz_all = []
intrinsics_all = []
extrinsics_all = []
confidence_all = []
points_mask_all = []
intrinsics_full_lst = []
confidence_filtered_all = []
near_fars_all = []
gpu_filter = True
cpu2gpu= len(dataset.view_id_list) > 300
imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[]
with torch.no_grad():
for i in tqdm(range(0, len(dataset.view_id_list))):
data = dataset.get_init_item(i)
model.set_input(data)
# intrinsics 1, 3, 3, 3
points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points()
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
B, N, C, H, W, _ = points_xyz_lst[0].shape
# print("points_xyz_lst",points_xyz_lst[0].shape)
cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy())
# intrinsics_lst[0] 1, 3, 3
intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0])
extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy())
if opt.manual_depth_view !=0:
confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy())
points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy())
imgs_lst.append(data["images"].cpu())
HDWD_lst.append(HDWD)
c2ws_lst.append(c2ws)
w2cs_lst.append(w2cs)
intrinsics_full_lst.append(intrinsics)
near_fars_all.append(near_fars[0,0])
# visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0)
# #################### start query embedding ##################
torch.cuda.empty_cache()
if opt.manual_depth_view != 0:
if gpu_filter:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all)
else:
_, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt)
# print(xyz_ref_lst[0].shape) # 224909, 3
else:
cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))]
xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))]
xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt)
del cam_xyz_all
# for i in range(len(xyz_world_all)):
# visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0)
# exit()
# xyz_world_all = xyz_world_all.cuda()
# confidence_filtered_all = confidence_filtered_all.cuda()
points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0)
xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor(
np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32)
confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32)
print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape)
torch.cuda.empty_cache()
# visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False)
# print("vis 0")
print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None))
if getattr(dataset, "spacemin", None) is not None:
mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0
mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0
mask = torch.prod(mask, dim=-1) > 0
first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
# visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False)
# print("vis 50")
if getattr(dataset, "alphas", None) is not None:
vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt)
first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], [])
xyz_world_all, points_vid, confidence_filtered_all = first_lst
print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape)
# visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0)
# print("vis 100")
if opt.vox_res > 0:
xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res)
points_vid = points_vid[sampled_pnt_idx,:]
confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx]
print("after voxelize:", xyz_world_all.shape, points_vid.shape)
xyz_world_all = xyz_world_all.cuda()
xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))]
confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))]
cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))]
points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], []
for i in tqdm(range(len(HDWD_lst))):
if len(xyz_world_all[i]) > 0:
embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True)
points_embedding_all.append(embedding)
points_color_all.append(color)
points_dir_all.append(dir)
points_conf_all.append(conf)
xyz_world_all = torch.cat(xyz_world_all, dim=0)
points_embedding_all = torch.cat(points_embedding_all, dim=1)
points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None
points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None
points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None
visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0)
print("vis")
model.cleanup()
del model
return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst] | null |
156,814 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
import cv2
from PIL import Image
from tqdm import tqdm
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) | null |
156,815 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
np.random.seed(0)
import cv2
from PIL import Image
from tqdm import tqdm
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath) | null |
156,816 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
import cv2
from PIL import Image
from tqdm import tqdm
def masking(mask, firstdim_lst, seconddim_lst):
first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst]
second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst]
return first_lst, second_lst | null |
156,817 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
np.random.seed(0)
import cv2
from PIL import Image
from tqdm import tqdm
def render(model, dataset, visualizer, opt, gen_vid=False):
print('-----------------------------------Testing-----------------------------------')
model.eval()
total_num = dataset.render_total
print("test set size {}, interval {}".format(total_num, opt.test_num_step))
patch_size = opt.random_sample_size
chunk_size = patch_size * patch_size
height = dataset.height
width = dataset.width
visualizer.reset()
cam_posts = []
cam_dirs = []
for i in range(0, total_num):
data = dataset.get_dummyrot_item(i)
raydir = data['raydir'].clone()
pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone()
# cam_posts.append(data['campos'])
# cam_dirs.append(data['raydir'] + data['campos'][None,...])
# continue
visuals = None
stime = time.time()
for k in range(0, height * width, chunk_size):
start = k
end = min([k + chunk_size, height * width])
data['raydir'] = raydir[:, start:end, :]
data["pixel_idx"] = pixel_idx[:, start:end, :]
# print("tmpgts", tmpgts["gt_image"].shape)
# print(data["pixel_idx"])
model.set_input(data)
model.test()
curr_visuals = model.get_current_visuals(data=data)
if visuals is None:
visuals = {}
for key, value in curr_visuals.items():
chunk = value.cpu().numpy()
visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype)
visuals[key][start:end, :] = chunk
else:
for key, value in curr_visuals.items():
visuals[key][start:end, :] = value.cpu().numpy()
for key, value in visuals.items():
visualizer.print_details("{}:{}".format(key, visuals[key].shape))
visuals[key] = visuals[key].reshape(height, width, 3)
print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir)
visualizer.display_current_results(visuals, i)
# visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False)
# visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False)
# print("vis")
# exit()
print('--------------------------------Finish Evaluation--------------------------------')
if gen_vid:
del dataset
visualizer.gen_video("coarse_raycolor", range(0, total_num), 0)
print('--------------------------------Finish generating vid--------------------------------')
return | null |
156,818 | import sys
import os
import pathlib
import glob
import copy
import torch
import numpy as np
import time
from options import TrainOptions
from options import EditOptions
from data import create_data_loader, create_dataset
from models import create_model
from models.mvs.mvs_points_model import MvsPointsModel
from models.mvs import mvs_utils, filter_utils
from utils.visualizer import Visualizer
from utils import format as fmt
from run.evaluate import report_metrics
from render_vid import render_vid
torch.manual_seed(0)
np.random.seed(0)
import cv2
from PIL import Image
from tqdm import tqdm
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def get_latest_epoch(resume_dir):
os.makedirs(resume_dir, exist_ok=True)
str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")]
int_epoch = [int(i) for i in str_epoch]
return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))]
def load_parts_info(opt, name, inds_name, trans_name):
resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(os.path.join(opt.checkpoints_dir , "edit_srcs" , name))
checkpoint = os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "{}_net_ray_marching.pth".format(resume_iter))
trans_file = None if trans_name.strip() == "no" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "transforms", trans_name + ".txt")
inds_file = None if inds_name.strip() == "all" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "parts_index", inds_name + ".txt")
Matrix = torch.eye(4, device="cuda", dtype=torch.float32) if trans_file is None else np.loadtxt(trans_file)
Rot = Matrix[:3,:3]
Translation = Matrix[:3, 3]
saved_features = torch.load(checkpoint, map_location="cuda")
print("loaded neural points from ", checkpoint, saved_features.keys())
if inds_file is None:
inds = torch.ones(len(saved_features["neural_points.xyz"]), dtype=torch.bool, device="cuda")
else:
inds = np.loadtxt(inds_file)
return saved_features, inds, Rot, Translation | null |
156,819 | import os, sys, time, argparse, cv2
import numpy as np
try:
from skimage.measure import compare_ssim
from skimage.measure import compare_psnr
except:
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
def compare_ssim(gt, img, win_size, multichannel=True):
return structural_similarity(gt, img, win_size=win_size, multichannel=multichannel)
import torch
from skimage.metrics import mean_squared_error
import lpips
def report_metrics(gtFolder, imgFolder, outFolder, metrics, id_list, imgStr="step-%04d-fine_raycolor.png", gtStr="step-%04d-gt_image.png", use_gpu=False, print_info=True):
total ={}
loss_fn, loss_fn_vgg = None, None
if print_info:
print("test id_list", id_list)
print(gtFolder, imgFolder, outFolder)
print(imgStr, gtStr)
if "lpips" in metrics:
loss_fn = lpips.LPIPS(net='alex', version='0.1') # we follow NVSF to use alex 0.1, NeRF use lpips.LPIPS(net='vgg')
loss_fn = loss_fn.cuda() if use_gpu else loss_fn
if "vgglpips" in metrics:
loss_fn_vgg = lpips.LPIPS(net='vgg', version='0.1') #lpips.LPIPS(net='vgg')
loss_fn_vgg = loss_fn_vgg.cuda() if use_gpu else loss_fn_vgg
for i in id_list:
img = cv2.imread(imgFolder+"/"+imgStr%i)
gt = cv2.imread(gtFolder+"/"+gtStr%i)
# print("img", imgFolder+"/"+imgStr%i)
if img is None or gt is None:
break
img = np.asarray(img, np.float32)/255.0
gt = np.asarray(gt, np.float32)/255.0
for key in metrics:
if key == "psnr":
val = compare_psnr(gt, img)
elif key == "ssim":
val = compare_ssim(gt, img, 11, multichannel=True)
elif key == "lpips":
# image should be RGB, IMPORTANT: normalized to [-1,1]
img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
img_tensor = img_tensor.cuda() if use_gpu else img_tensor
gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor
val = loss_fn(img_tensor, gt_tensor).item()
elif key == "vgglpips":
# image should be RGB, IMPORTANT: normalized to [-1,1]
img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0
img_tensor = img_tensor.cuda() if use_gpu else img_tensor
gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor
val = loss_fn_vgg(img_tensor, gt_tensor).item()
elif key == "rmse":
val = np.sqrt(mean_squared_error(gt, img))
else:
raise NotImplementedError("metrics of {} not implemented".format(key))
if key not in total:
total[key] = [val]
else:
total[key].append(val)
del loss_fn
del loss_fn_vgg
torch.cuda.empty_cache()
print(len(id_list), "images computed")
if len(total) > 0:
outStr = ""
for key in total.keys():
vals = np.asarray(total[key]).reshape(-1)
np.savetxt(outFolder+"/"+key+'.txt', vals)
outStr+= key + ": %.6f\n"%np.mean(vals)
print(outStr)
with open(outFolder+"/scores.txt", "w") as f:
f.write(outStr) | null |
156,820 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .module import *
def mvsnet_loss(depth_est, depth_gt, mask):
mask = mask > 0.5
return F.smooth_l1_loss(depth_est[mask], depth_gt[mask], size_average=True) | null |
156,821 | import torch
import torch.nn as nn
import torch.nn.functional as F
def homo_warping(src_fea, proj, depth_values):
# src_fea: [B, C, H, W]
# src_proj: [B, 4, 4]
# ref_proj: [B, 4, 4]
# depth_values: [B, Ndepth]
# out: [B, C, Ndepth, H, W]
batch, channels = src_fea.shape[0], src_fea.shape[1]
num_depth = depth_values.shape[1]
height, width = src_fea.shape[2], src_fea.shape[3]
with torch.no_grad():
rot = proj[:, :3, :3] # [B,3,3]
trans = proj[:, :3, 3:4] # [B,3,1]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_fea.device),
torch.arange(0, width, dtype=torch.float32, device=src_fea.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W]
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W]
rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W]
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_values.view(batch, 1, num_depth,
1) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W]
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2]
grid = proj_xy
warped_src_fea = F.grid_sample(src_fea, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
return warped_src_fea | null |
156,822 | import torch
import torch.nn as nn
import torch.nn.functional as F
def depth_regression(p, depth_values):
# p: probability volume [B, D, H, W]
# depth_values: discrete depth values [B, D]
depth_values = depth_values.view(*depth_values.shape, 1, 1)
depth = torch.sum(p * depth_values, 1)
return depth | null |
156,823 | import torch
import torch.nn as nn
import torch.nn.functional as F
def tocpu(x):
return x.detach().cpu().numpy().copy() | null |
156,824 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def near_middle_far_ray_generation(campos,
raydir,
point_count,
near=0.1,
middle=2,
far=10,
middle_split=0.6,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0,
1,
int(point_count * middle_split) + 1,
device=campos.device).view(1, -1)
vals0 = near * (1 - tvals) + middle * tvals # N x 1 x Sammples
tvals = torch.linspace(0,
1,
int(point_count * (1 - middle_split)) + 2,
device=campos.device).view(1, -1)
vals1 = 1. / (1. / middle *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
tvals = torch.cat([vals0, vals1], 2)
segment_length = (tvals[..., 1:] - tvals[..., :-1]) * (
1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], tvals.shape[-1] - 1),
device=campos.device) - 0.5))
segment_length = segment_length[..., :point_count]
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
],
dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :,
None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
return raypos, segment_length, valid, middle_point_ts
def near_far_disparity_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count + 1,
device=campos.device).view(1, -1)
tvals = 1. / (1. / near *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
segment_length = (tvals[..., 1:] -
tvals[..., :-1]) * (1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], point_count),
device=campos.device) - 0.5))
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
], dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :,
None]
# print(tvals.shape, segment_length.shape, end_point_ts.shape, middle_point_ts.shape, raypos.shape)
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, middle_point_ts
def nerf_near_far_disparity_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=1.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count,
device=campos.device).view(1, -1)
tvals = 1. / (1. / near *
(1 - tvals) + 1. / far * tvals) # N x 1 x Sammples
if jitter > 0.0:
mids = .5 * (tvals[..., 1:] + tvals[..., :-1])
upper = torch.cat([mids, tvals[..., -1:]], -1)
lower = torch.cat([tvals[..., :1], mids], -1)
t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device)
tvals = lower + (upper - lower) * t_rand
# print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape))
segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1)
# print("segment_length, {}".format(segment_length.shape))
raypos = campos[:, None,
None, :] + raydir[:, :, None, :] * tvals[:, :, :, None]
# print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape))
# print("raypos", raypos[0])
valid = torch.ones_like(tvals,
dtype=raypos.dtype,
device=raypos.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, tvals
def nerf_near_far_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=1.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
tvals = torch.linspace(0, 1, point_count,
device=campos.device).view(1, -1)
tvals = near * (1.-tvals) + far * (tvals) # N x 1 x Sammples
if jitter > 0.0:
mids = .5 * (tvals[..., 1:] + tvals[..., :-1])
upper = torch.cat([mids, tvals[..., -1:]], -1)
lower = torch.cat([tvals[..., :1], mids], -1)
t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device)
tvals = lower + (upper - lower) * t_rand
# print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape))
segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1)
raypos = campos[:, None, None, :] + raydir[:, :, None, :] * tvals[:, :, :, None]
# print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape))
# print("raypos", raypos[0])
valid = torch.ones_like(tvals,
dtype=raypos.dtype,
device=raypos.device)
# print("campos", campos.shape, campos[0])
# print("raydir", raydir.shape, raydir[0,0])
# print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0])
# print("raypos", raypos.shape, raypos[0,0])
return raypos, segment_length, valid, tvals
def near_far_linear_ray_generation(campos,
raydir,
point_count,
near=0.1,
far=10,
jitter=0.,
**kargs):
# inputs
# campos: N x 3
# raydir: N x Rays x 3, must be normalized
# near: N x 1 x 1
# far: N x 1 x 1
# jitter: float in [0, 1), a fraction of step length
# outputs
# raypos: N x Rays x Samples x 3
# segment_length: N x Rays x Samples
# valid: N x Rays x Samples
# ts: N x Rays x Samples
# print("campos", campos.shape)
# print("raydir", raydir.shape)
tvals = torch.linspace(0, 1, point_count + 1,
device=campos.device).view(1, -1)
tvals = near * (1 - tvals) + far * tvals # N x 1 x Sammples
segment_length = (tvals[..., 1:] -
tvals[..., :-1]) * (1 + jitter * (torch.rand(
(raydir.shape[0], raydir.shape[1], point_count),
device=campos.device) - 0.5))
end_point_ts = torch.cumsum(segment_length, dim=2)
end_point_ts = torch.cat([
torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1),
device=end_point_ts.device), end_point_ts
],
dim=2)
end_point_ts = near + end_point_ts
middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2
raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None]
valid = torch.ones_like(middle_point_ts,
dtype=middle_point_ts.dtype,
device=middle_point_ts.device)
segment_length*=torch.linalg.norm(raydir[..., None, :], axis=-1)
return raypos, segment_length, valid, middle_point_ts
def find_ray_generation_method(name):
assert isinstance(name, str), 'ray generation method name must be string'
if name == 'cube':
return cube_ray_generation
elif name == 'near_far_linear':
return near_far_linear_ray_generation
elif name == 'near_far_disparity_linear':
return near_far_disparity_linear_ray_generation
elif name == 'nerf_near_far_disparity_linear':
return nerf_near_far_disparity_linear_ray_generation
elif name == 'nerf_near_far_linear':
return nerf_near_far_linear_ray_generation
elif name == 'near_middle_far':
return near_middle_far_ray_generation
raise RuntimeError('No such ray generation method: ' + name) | null |
156,825 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def refine_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
def nerf_refine_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
def refine_cube_ray_generation(campos,
raydir,
point_count,
prev_ts,
prev_weights,
domain_size=1.,
jitter=0,
**kargs):
def find_refined_ray_generation_method(name):
assert isinstance(name, str), 'ray generation method name must be string'
if name == 'cube':
return refine_cube_ray_generation
elif name.startswith('nerf'):
return nerf_refine_ray_generation
else:
#hack default
return refine_ray_generation
raise RuntimeError('No such refined ray generation method: ' + name) | null |
156,826 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def ray_march(ray_dist,
ray_valid,
ray_features,
render_func,
blend_func,
bg_color=None):
# ray_dist: N x Rays x Samples
# ray_valid: N x Rays x Samples
# ray_features: N x Rays x Samples x Features
# Output
# ray_color: N x Rays x 3
# point_color: N x Rays x Samples x 3
# opacity: N x Rays x Samples
# acc_transmission: N x Rays x Samples
# blend_weight: N x Rays x Samples x 1
# background_transmission: N x Rays x 1
point_color = render_func(ray_features)
# we are essentially predicting predict 1 - e^-sigma
sigma = ray_features[..., 0] * ray_valid.float()
opacity = 1 - torch.exp(-sigma * ray_dist)
# cumprod exclusive
acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1)
temp = torch.ones(opacity.shape[0:2] + (1, )).to(
opacity.device).float() # N x R x 1
background_transmission = acc_transmission[:, :, [-1]]
acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1)
blend_weight = blend_func(opacity, acc_transmission)[..., None]
ray_color = torch.sum(point_color * blend_weight, dim=-2, keepdim=False)
if bg_color is not None:
ray_color += bg_color.to(opacity.device).float().view(
background_transmission.shape[0], 1, 3) * background_transmission
# #
# if point_color.shape[1] > 0 and (torch.any(torch.isinf(point_color)) or torch.any(torch.isnan(point_color))):
# print("ray_color", torch.min(ray_color),torch.max(ray_color))
# print("background_transmission", torch.min(background_transmission), torch.max(background_transmission))
background_blend_weight = blend_func(1, background_transmission)
# print("ray_color", torch.max(torch.abs(ray_color)), torch.max(torch.abs(sigma)), torch.max(torch.abs(opacity)),torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_transmission)), torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_blend_weight)))
return ray_color, point_color, opacity, acc_transmission, blend_weight, \
background_transmission, background_blend_weight | null |
156,827 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def alpha_ray_march(ray_dist, ray_valid, ray_features,
blend_func):
sigma = ray_features[..., 0] * ray_valid.float()
opacity = 1 - torch.exp(-sigma * ray_dist)
acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1)
temp = torch.ones(opacity.shape[0:2] + (1, )).to(
opacity.device).float() # N x R x 1
background_transmission = acc_transmission[:, :, [-1]]
acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1)
blend_weight = blend_func(opacity, acc_transmission)[..., None]
background_blend_weight = blend_func(1, background_transmission)
return opacity, acc_transmission, blend_weight, \
background_transmission, background_blend_weight | null |
156,828 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
def radiance_render(ray_feature):
return ray_feature[..., 1:4]
def white_color(ray_feature):
albedo = ray_feature[..., 1:4].clamp(0., 1.)
return torch.ones_like(albedo)
def find_render_function(name):
if name == 'radiance':
return radiance_render
elif name == 'white':
return white_color
raise RuntimeError('Unknown render function: ' + name) | null |
156,829 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
def alpha_blend(opacity, acc_transmission):
return opacity * acc_transmission
def alpha2_blend(opacity, acc_transmission):
'''
Consider a light collocated with the camera,
multiply the transmission twice to simulate the light in a round trip
'''
return opacity * acc_transmission * acc_transmission
def find_blend_function(name):
if name == 'alpha':
return alpha_blend
elif name == 'alpha2':
return alpha2_blend
raise RuntimeError('Unknown blend function: ' + name) | null |
156,830 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
def simple_tone_map(color, gamma=2.2, exposure=1):
def no_tone_map(color, gamma=2.2, exposure=1):
def normalize_tone_map(color):
def find_tone_map(name):
if name == 'gamma':
return simple_tone_map
elif name == 'normalize':
return normalize_tone_map
elif name == 'off':
return no_tone_map
raise RuntimeError('Unknown blend function: ' + name) | null |
156,831 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from data.load_blender import load_blender_data
def load_pnts(point_path, point_num):
with open(point_path, 'rb') as f:
print("point_file_path################", point_path)
all_infos = pickle.load(f)
point_xyz = all_infos["point_xyz"]
print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0),
np.max(point_xyz, axis=0))
np.random.shuffle(point_xyz)
return point_xyz[:min(len(point_xyz), point_num), :]
def np_to_gpuarray(*args):
result = []
for x in args:
if isinstance(x, np.ndarray):
result.append(pycuda.gpuarray.to_gpu(x))
else:
print("trans",x)
return result
def w2img(point_xyz, transform_matrix, focal):
camrot = transform_matrix[:3, :3] # world 2 cam
campos = transform_matrix[:3, 3] #
point_xyz_shift = point_xyz - campos[None, :]
# xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1)
xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2)
# print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape)
xper = xyz[:, 0] / -xyz[:, 2]
yper = xyz[:, 1] / xyz[:, 2]
x_pixel = np.round(xper * focal + 400).astype(np.int32)
y_pixel = np.round(yper * focal + 400).astype(np.int32)
print("focal", focal, np.tan(.5 * 0.6911112070083618))
print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel))
print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:",
np.max(xyz[:, 2]), np.min(xyz[:, 2]))
print("min perx", -400 / focal, "max perx", 400 / focal)
background = np.ones([800, 800, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .2
plt.figure()
plt.imshow(background)
return np.stack([xper, yper, -xyz[:, 2]], axis=-1)
def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor,
pixel_idx_cur_tensor, vdim, vsize, ranges):
B, R, SR, K = sample_pidx_tensor.shape
# pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64)
point_inds = sample_pidx_tensor[0, :, :, :]
# point_inds = sample_pidx_tensor[0, pixel_inds, :, :]
mask = point_inds > -1
point_inds = torch.masked_select(point_inds, mask).to(torch.int64)
queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :]
queried_point_xyz = queried_point_xyz_tensor.cpu().numpy()
print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape,
queried_point_xyz.shape)
print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape)
render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1])
render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1])
plt.show()
def load_blender_data(basedir, splits, half_res=False, testskip=1):
splits = ['train', 'val', 'test'] if splits is None else splits
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s=='train' or testskip==0:
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']) @ blender2opencv)
imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
stride = 20
render_poses = np.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, stride+1)[:-1]],0)
if half_res:
imgs = tf.image.resize_area(imgs, [400, 400]).numpy()
H = H//2
W = W//2
focal = focal/2.
intrinsic = np.asarray([[focal, 0, W/2],
[0, focal, H/2],
[0,0,1]])
return imgs, poses, render_poses, [H, W, focal], i_split, intrinsic
def try_build(point_file, point_dir, ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, split=["train"], imgidx=0, gpu=0):
point_path = os.path.join(point_dir, point_file)
point_xyz = load_pnts(point_path, 819200000) # 81920 233872
imgs, poses, _, hwf, _ = load_blender_data(
os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1)
H, W, focal = hwf
plt.figure()
plt.imshow(imgs[imgidx])
point_xyz_pers = w2img(point_xyz, poses[imgidx], focal)
point_xyz_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None, ...]
# plt.show()
point_xyz_pers_tensor = torch.as_tensor(point_xyz_pers, device="cuda:{}".format(gpu))[None, ...]
actual_numpoints_tensor = torch.ones([1], device=point_xyz_tensor.device, dtype=torch.int32) * len(point_xyz)
scaled_vsize = (vsize * vscale).astype(np.float32)
scaled_vdim = np.ceil(vdim / vscale).astype(np.int32)
print("vsize", vsize, "vdim", vdim, "scaled_vdim", scaled_vdim)
range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size)
pixel_idx_tensor = torch.as_tensor(pixel_idx, device="cuda:{}".format(gpu), dtype=torch.int32)[None, ...]
sample_pidx_tensor, pixel_idx_cur_tensor = build_grid_point_index(pixel_idx_tensor, point_xyz_pers_tensor, actual_numpoints_tensor, kernel_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, gpu=gpu)
save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor,
pixel_idx_cur_tensor, vdim, vsize, ranges) | null |
156,832 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from data.load_blender import load_blender_data
def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images')
image_file = os.path.join(image_dir) | null |
156,833 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def load_pnts(point_path, point_num):
with open(point_path, 'rb') as f:
print("point_file_path################", point_path)
all_infos = pickle.load(f)
point_xyz = all_infos["point_xyz"]
print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0),
np.max(point_xyz, axis=0))
np.random.shuffle(point_xyz)
return point_xyz[:min(len(point_xyz), point_num), :] | null |
156,834 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def np_to_gpuarray(*args):
result = []
for x in args:
if isinstance(x, np.ndarray):
result.append(pycuda.gpuarray.to_gpu(x))
else:
print("trans",x)
return result | null |
156,835 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
class lighting_fast_querier():
def __init__(self, device, opt):
print("querier device", device, device.index)
self.gpu = device.index
self.opt = opt
drv.init()
# self.device = drv.Device(gpu)
self.ctx = drv.Device(self.gpu).make_context()
self.claim_occ, self.map_coor2occ, self.fill_occ2pnts, self.mask_raypos, self.get_shadingloc, self.query_along_ray = self.build_cuda()
self.inverse = self.opt.inverse
self.count=0
def clean_up(self):
self.ctx.pop()
def get_hyperparameters(self, vsize_np, point_xyz_w_tensor, ranges=None):
'''
:param l:
:param h:
:param w:
:param zdim:
:param ydim:
:param xdim:
:return:
'''
min_xyz, max_xyz = torch.min(point_xyz_w_tensor, dim=-2)[0][0], torch.max(point_xyz_w_tensor, dim=-2)[0][0]
vscale_np = np.array(self.opt.vscale, dtype=np.int32)
scaled_vsize_np = (vsize_np * vscale_np).astype(np.float32)
if ranges is not None:
# print("min_xyz", min_xyz.shape)
# print("max_xyz", max_xyz.shape)
# print("ranges", ranges)
min_xyz, max_xyz = torch.max(torch.stack([min_xyz, torch.as_tensor(ranges[:3], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0], torch.min(torch.stack([max_xyz, torch.as_tensor(ranges[3:], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0]
min_xyz = min_xyz - torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
max_xyz = max_xyz + torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32)
ranges_np = torch.cat([min_xyz, max_xyz], dim=-1).cpu().numpy().astype(np.float32)
# print("ranges_np",ranges_np)
vdim_np = (max_xyz - min_xyz).cpu().numpy() / vsize_np
scaled_vdim_np = np.ceil(vdim_np / vscale_np).astype(np.int32)
ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = np_to_gpuarray(
ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, np.asarray(self.opt.kernel_size, dtype=np.int32),
np.asarray(self.opt.query_size, dtype=np.int32))
radius_limit_np, depth_limit_np = self.opt.radius_limit_scale * max(vsize_np[0], vsize_np[1]), self.opt.depth_limit_scale * vsize_np[2]
return np.asarray(radius_limit_np).astype(np.float32), np.asarray(depth_limit_np).astype(np.float32), ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu
def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor):
near_depth, far_depth = np.asarray(near_depth).item() , np.asarray(far_depth).item()
radius_limit_np, depth_limit_np, ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(self.opt.vsize, point_xyz_w_tensor, ranges=self.opt.ranges)
# print("self.opt.ranges", self.opt.ranges, range_gpu, ray_dirs_tensor)
if self.opt.inverse > 0:
raypos_tensor, _, _, _ = near_far_disparity_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
else:
raypos_tensor, _, _, _ = near_far_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.)
sample_pidx_tensor, sample_loc_w_tensor, ray_mask_tensor = self.query_grid_point_index(h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, self.opt.SR, self.opt.K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, self.opt.max_o, self.opt.P, radius_limit_np, depth_limit_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock=self.opt.gpu_maxthr)
sample_ray_dirs_tensor = torch.masked_select(ray_dirs_tensor, ray_mask_tensor[..., None]>0).reshape(ray_dirs_tensor.shape[0],-1,3)[...,None,:].expand(-1, -1, self.opt.SR, -1).contiguous()
# print("sample_ray_dirs_tensor", sample_ray_dirs_tensor.shape)
return sample_pidx_tensor, self.w2pers(sample_loc_w_tensor, cam_rot_tensor, cam_pos_tensor), sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize_np, ranges_np
def w2pers(self, point_xyz_w, camrotc2w, campos):
# point_xyz_pers B X M X 3
xyz_w_shift = point_xyz_w - campos[:, None, :]
xyz_c = torch.sum(xyz_w_shift[..., None,:] * torch.transpose(camrotc2w, 1, 2)[:, None, None,...], dim=-1)
z_pers = xyz_c[..., 2]
x_pers = xyz_c[..., 0] / xyz_c[..., 2]
y_pers = xyz_c[..., 1] / xyz_c[..., 2]
return torch.stack([x_pers, y_pers, z_pers], dim=-1)
def build_cuda(self):
mod = SourceModule(
"""
#define KN """ + str(self.opt.K)
+ """
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <vector>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <curand_kernel.h>
namespace cuda {
static __device__ inline uint8_t atomicAdd(uint8_t *address, uint8_t val) {
size_t offset = (size_t)address & 3;
uint32_t *address_as_ui = (uint32_t *)(address - offset);
uint32_t old = *address_as_ui;
uint32_t shift = offset * 8;
uint32_t old_byte;
uint32_t newval;
uint32_t assumed;
do {
assumed = old;
old_byte = (old >> shift) & 0xff;
// preserve size in initial cast. Casting directly to uint32_t pads
// negative signed values with 1's (e.g. signed -1 = unsigned ~0).
newval = static_cast<uint8_t>(val + old_byte);
newval = (old & ~(0x000000ff << shift)) | (newval << shift);
old = atomicCAS(address_as_ui, assumed, newval);
} while (assumed != old);
return __byte_perm(old, 0, offset); // need validate
}
static __device__ inline char atomicAdd(char* address, char val) {
// offset, in bytes, of the char* address within the 32-bit address of the space that overlaps it
size_t long_address_modulo = (size_t) address & 3;
// the 32-bit address that overlaps the same memory
auto* base_address = (unsigned int*) ((char*) address - long_address_modulo);
// A 0x3210 selector in __byte_perm will simply select all four bytes in the first argument in the same order.
// The "4" signifies the position where the first byte of the second argument will end up in the output.
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
// for selecting bytes within a 32-bit chunk that correspond to the char* address (relative to base_address)
unsigned int selector = selectors[long_address_modulo];
unsigned int long_old, long_assumed, long_val, replacement;
long_old = *base_address;
do {
long_assumed = long_old;
// replace bits in long_old that pertain to the char address with those from val
long_val = __byte_perm(long_old, 0, long_address_modulo) + val;
replacement = __byte_perm(long_old, long_val, selector);
long_old = atomicCAS(base_address, long_assumed, replacement);
} while (long_old != long_assumed);
return __byte_perm(long_old, 0, long_address_modulo);
}
static __device__ inline int8_t atomicAdd(int8_t *address, int8_t val) {
return (int8_t)cuda::atomicAdd((char*)address, (char)val);
}
static __device__ inline short atomicAdd(short* address, short val)
{
unsigned int *base_address = (unsigned int *)((size_t)address & ~2);
unsigned int long_val = ((size_t)address & 2) ? ((unsigned int)val << 16) : (unsigned short)val;
unsigned int long_old = ::atomicAdd(base_address, long_val);
if((size_t)address & 2) {
return (short)(long_old >> 16);
} else {
unsigned int overflow = ((long_old & 0xffff) + long_val) & 0xffff0000;
if (overflow)
atomicSub(base_address, overflow);
return (short)(long_old & 0xffff);
}
}
static __device__ float cas(double *addr, double compare, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *) addr;
return __longlong_as_double(atomicCAS(address_as_ull,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
static __device__ float cas(float *addr, float compare, float val) {
unsigned int *address_as_uint = (unsigned int *) addr;
return __uint_as_float(atomicCAS(address_as_uint,
__float_as_uint(compare),
__float_as_uint(val)));
}
static __device__ inline uint8_t atomicCAS(uint8_t * const address, uint8_t const compare, uint8_t const value)
{
uint8_t const longAddressModulo = reinterpret_cast< size_t >( address ) & 0x3;
uint32_t *const baseAddress = reinterpret_cast< uint32_t * >( address - longAddressModulo );
uint32_t constexpr byteSelection[] = { 0x3214, 0x3240, 0x3410, 0x4210 }; // The byte position we work on is '4'.
uint32_t const byteSelector = byteSelection[ longAddressModulo ];
uint32_t const longCompare = compare;
uint32_t const longValue = value;
uint32_t longOldValue = * baseAddress;
uint32_t longAssumed;
uint8_t oldValue;
do {
// Select bytes from the old value and new value to construct a 32-bit value to use.
uint32_t const replacement = __byte_perm( longOldValue, longValue, byteSelector );
uint32_t const comparison = __byte_perm( longOldValue, longCompare, byteSelector );
longAssumed = longOldValue;
// Use 32-bit atomicCAS() to try and set the 8-bits we care about.
longOldValue = ::atomicCAS( baseAddress, comparison, replacement );
// Grab the 8-bit portion we care about from the old value at address.
oldValue = ( longOldValue >> ( 8 * longAddressModulo )) & 0xFF;
} while ( compare == oldValue and longAssumed != longOldValue ); // Repeat until other three 8-bit values stabilize.
return oldValue;
}
}
extern "C" {
__global__ void claim_occ(
const float* in_data, // B * N * 3
const int* in_actual_numpoints, // B
const int B,
const int N,
const float *d_coord_shift, // 3
const float *d_voxel_size, // 3
const int *d_grid_size, // 3
const int grid_size_vol,
const int max_o,
int* occ_idx, // B, all 0
int *coor_2_occ, // B * 400 * 400 * 400, all -1
int *occ_2_coor, // B * max_o * 3, all -1
unsigned long seconds
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - N * i_batch;
if (i_pt < in_actual_numpoints[i_batch]) {
int coor[3];
const float *p_pt = in_data + index * 3;
coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]);
// printf("p_pt %f %f %f %f; ", p_pt[2], d_coord_shift[2], d_coord_shift[0], d_coord_shift[1]);
if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; }
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
int voxel_idx = coor_2_occ[coor_indx_b];
if (voxel_idx == -1) { // found an empty voxel
int old_voxel_num = atomicCAS(
&coor_2_occ[coor_indx_b],
-1, 0
);
if (old_voxel_num == -1) {
// CAS -> old val, if old val is -1
// if we get -1, this thread is the one who obtain a new voxel
// so only this thread should do the increase operator below
int tmp = atomicAdd(occ_idx+i_batch, 1); // increase the counter, return old counter
// increase the counter, return old counter
if (tmp < max_o) {
int coord_inds = (i_batch * max_o + tmp) * 3;
occ_2_coor[coord_inds] = coor[0];
occ_2_coor[coord_inds + 1] = coor[1];
occ_2_coor[coord_inds + 2] = coor[2];
} else {
curandState state;
curand_init(index+2*seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1;
if(insrtidx < max_o){
int coord_inds = (i_batch * max_o + insrtidx) * 3;
occ_2_coor[coord_inds] = coor[0];
occ_2_coor[coord_inds + 1] = coor[1];
occ_2_coor[coord_inds + 2] = coor[2];
}
}
}
}
}
}
__global__ void map_coor2occ(
const int B,
const int *d_grid_size, // 3
const int *kernel_size, // 3
const int grid_size_vol,
const int max_o,
int* occ_idx, // B, all -1
int *coor_occ, // B * 400 * 400 * 400
int *coor_2_occ, // B * 400 * 400 * 400
int *occ_2_coor // B * max_o * 3
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / max_o; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - max_o * i_batch;
if (i_pt < occ_idx[i_batch] && i_pt < max_o) {
int coor[3];
coor[0] = occ_2_coor[index*3];
if (coor[0] < 0) { return; }
coor[1] = occ_2_coor[index*3+1];
coor[2] = occ_2_coor[index*3+2];
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
coor_2_occ[coor_indx_b] = i_pt;
// printf("kernel_size[0] %d", kernel_size[0]);
for (int coor_x = max(0, coor[0] - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], coor[0] + (kernel_size[0] + 1) / 2); coor_x++) {
for (int coor_y = max(0, coor[1] - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], coor[1] + (kernel_size[1] + 1) / 2); coor_y++) {
for (int coor_z = max(0, coor[2] - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], coor[2] + (kernel_size[2] + 1) / 2); coor_z++) {
coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
if (coor_occ[coor_indx_b] > 0) { continue; }
atomicCAS(coor_occ + coor_indx_b, 0, 1);
}
}
}
}
}
__global__ void fill_occ2pnts(
const float* in_data, // B * N * 3
const int* in_actual_numpoints, // B
const int B,
const int N,
const int P,
const float *d_coord_shift, // 3
const float *d_voxel_size, // 3
const int *d_grid_size, // 3
const int grid_size_vol,
const int max_o,
int *coor_2_occ, // B * 400 * 400 * 400, all -1
int *occ_2_pnts, // B * max_o * P, all -1
int *occ_numpnts, // B * max_o, all 0
unsigned long seconds
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / N; // index of batch
if (i_batch >= B) { return; }
int i_pt = index - N * i_batch;
if (i_pt < in_actual_numpoints[i_batch]) {
int coor[3];
const float *p_pt = in_data + index * 3;
coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]);
if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; }
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
int voxel_idx = coor_2_occ[coor_indx_b];
if (voxel_idx > 0) { // found an claimed coor2occ
int occ_indx_b = i_batch * max_o + voxel_idx;
int tmp = atomicAdd(occ_numpnts + occ_indx_b, 1); // increase the counter, return old counter
if (tmp < P) {
occ_2_pnts[occ_indx_b * P + tmp] = i_pt;
} else {
curandState state;
curand_init(index+2*seconds, 0, 0, &state);
int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1;
if(insrtidx < P){
occ_2_pnts[occ_indx_b * P + insrtidx] = i_pt;
}
}
}
}
}
__global__ void mask_raypos(
float *raypos, // [B, 2048, 400, 3]
int *coor_occ, // B * 400 * 400 * 400
const int B, // 3
const int R, // 3
const int D, // 3
const int grid_size_vol,
const float *d_coord_shift, // 3
const int *d_grid_size, // 3
const float *d_voxel_size, // 3
int *raypos_mask // B, R, D
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * D); // index of batch
if (i_batch >= B) { return; }
int coor[3];
coor[0] = (int) floor((raypos[index*3] - d_coord_shift[0]) / d_voxel_size[0]);
coor[1] = (int) floor((raypos[index*3+1] - d_coord_shift[1]) / d_voxel_size[1]);
coor[2] = (int) floor((raypos[index*3+2] - d_coord_shift[2]) / d_voxel_size[2]);
// printf(" %f %f %f;", raypos[index*3], raypos[index*3+1], raypos[index*3+2]);
if ((coor[0] >= 0) && (coor[0] < d_grid_size[0]) && (coor[1] >= 0) && (coor[1] < d_grid_size[1]) && (coor[2] >= 0) && (coor[2] < d_grid_size[2])) {
int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2];
raypos_mask[index] = coor_occ[coor_indx_b];
}
}
__global__ void get_shadingloc(
const float *raypos, // [B, 2048, 400, 3]
const int *raypos_mask, // B, R, D
const int B, // 3
const int R, // 3
const int D, // 3
const int SR, // 3
float *sample_loc, // B * R * SR * 3
int *sample_loc_mask // B * R * SR
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * D); // index of batch
if (i_batch >= B) { return; }
int temp = raypos_mask[index];
if (temp >= 0) {
int r = (index - i_batch * R * D) / D;
int loc_inds = i_batch * R * SR + r * SR + temp;
sample_loc[loc_inds * 3] = raypos[index * 3];
sample_loc[loc_inds * 3 + 1] = raypos[index * 3 + 1];
sample_loc[loc_inds * 3 + 2] = raypos[index * 3 + 2];
sample_loc_mask[loc_inds] = 1;
}
}
__global__ void query_neigh_along_ray_layered(
const float* in_data, // B * N * 3
const int B,
const int SR, // num. samples along each ray e.g., 128
const int R, // e.g., 1024
const int max_o,
const int P,
const int K, // num. neighbors
const int grid_size_vol,
const float radius_limit2,
const float *d_coord_shift, // 3
const int *d_grid_size,
const float *d_voxel_size, // 3
const int *kernel_size,
const int *occ_numpnts, // B * max_o
const int *occ_2_pnts, // B * max_o * P
const int *coor_2_occ, // B * 400 * 400 * 400
const float *sample_loc, // B * R * SR * 3
const int *sample_loc_mask, // B * R * SR
int *sample_pidx, // B * R * SR * K
unsigned long seconds,
const int NN
) {
int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread
int i_batch = index / (R * SR); // index of batch
if (i_batch >= B || sample_loc_mask[index] <= 0) { return; }
float centerx = sample_loc[index * 3];
float centery = sample_loc[index * 3 + 1];
float centerz = sample_loc[index * 3 + 2];
int frustx = (int) floor((centerx - d_coord_shift[0]) / d_voxel_size[0]);
int frusty = (int) floor((centery - d_coord_shift[1]) / d_voxel_size[1]);
int frustz = (int) floor((centerz - d_coord_shift[2]) / d_voxel_size[2]);
centerx = sample_loc[index * 3];
centery = sample_loc[index * 3 + 1];
centerz = sample_loc[index * 3 + 2];
int kid = 0, far_ind = 0, coor_z, coor_y, coor_x;
float far2 = 0.0;
float xyz2Buffer[KN];
for (int layer = 0; layer < (kernel_size[0]+1)/2; layer++){
for (int x = max(-frustx, -layer); x < min(d_grid_size[0] - frustx, layer + 1); x++) {
coor_x = frustx + x;
for (int y = max(-frusty, -layer); y < min(d_grid_size[1] - frusty, layer + 1); y++) {
coor_y = frusty + y;
for (int z = max(-frustz, -layer); z < min(d_grid_size[2] - frustz, layer + 1); z++) {
coor_z = z + frustz;
if (max(abs(z), max(abs(x), abs(y))) != layer) continue;
int coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z;
int occ_indx = coor_2_occ[coor_indx_b] + i_batch * max_o;
if (occ_indx >= 0) {
for (int g = 0; g < min(P, occ_numpnts[occ_indx]); g++) {
int pidx = occ_2_pnts[occ_indx * P + g];
float x_v = (in_data[pidx*3]-centerx);
float y_v = (in_data[pidx*3 + 1]-centery);
float z_v = (in_data[pidx*3 + 2]-centerz);
float xyz2 = x_v * x_v + y_v * y_v + z_v * z_v;
if ((radius_limit2 == 0 || xyz2 <= radius_limit2)){
if (kid++ < K) {
sample_pidx[index * K + kid - 1] = pidx;
xyz2Buffer[kid-1] = xyz2;
if (xyz2 > far2){
far2 = xyz2;
far_ind = kid - 1;
}
} else {
if (xyz2 < far2) {
sample_pidx[index * K + far_ind] = pidx;
xyz2Buffer[far_ind] = xyz2;
far2 = xyz2;
for (int i = 0; i < K; i++) {
if (xyz2Buffer[i] > far2) {
far2 = xyz2Buffer[i];
far_ind = i;
}
}
}
}
}
}
}
}
}
}
if (kid >= K) break;
}
}
}
""", no_extern_c=True)
claim_occ = mod.get_function("claim_occ")
map_coor2occ = mod.get_function("map_coor2occ")
fill_occ2pnts = mod.get_function("fill_occ2pnts")
mask_raypos = mod.get_function("mask_raypos")
get_shadingloc = mod.get_function("get_shadingloc")
query_along_ray = mod.get_function("query_neigh_along_ray_layered") if self.opt.NN > 0 else mod.get_function("query_rand_along_ray")
return claim_occ, map_coor2occ, fill_occ2pnts, mask_raypos, get_shadingloc, query_along_ray
def switch_pixel_id(self, pixel_idx_tensor, h):
pixel_id = torch.cat([pixel_idx_tensor[..., 0:1], h - 1 - pixel_idx_tensor[..., 1:2]], dim=-1)
# print("pixel_id", pixel_id.shape, torch.min(pixel_id, dim=-2)[0], torch.max(pixel_id, dim=-2)[0])
return pixel_id
def build_occ_vox(self, point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, kernel_size_gpu, grid_size_vol, d_coord_shift):
device = point_xyz_w_tensor.device
coor_occ_tensor = torch.zeros([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], dtype=torch.int32, device=device)
occ_2_pnts_tensor = torch.full([B, max_o, P], -1, dtype=torch.int32, device=device)
occ_2_coor_tensor = torch.full([B, max_o, 3], -1, dtype=torch.int32, device=device)
occ_numpnts_tensor = torch.zeros([B, max_o], dtype=torch.int32, device=device)
coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1, dtype=torch.int32, device=device)
occ_idx_tensor = torch.zeros([B], dtype=torch.int32, device=device)
seconds = time.time()
self.claim_occ(
Holder(point_xyz_w_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
d_coord_shift,
scaled_vsize_gpu,
scaled_vdim_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(occ_idx_tensor),
Holder(coor_2_occ_tensor),
Holder(occ_2_coor_tensor),
np.uint64(seconds),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1,
dtype=torch.int32, device=device)
gridSize = int((B * max_o + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.map_coor2occ(
np.int32(B),
scaled_vdim_gpu,
kernel_size_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(occ_idx_tensor),
Holder(coor_occ_tensor),
Holder(coor_2_occ_tensor),
Holder(occ_2_coor_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
seconds = time.time()
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.fill_occ2pnts(
Holder(point_xyz_w_tensor),
Holder(actual_numpoints_tensor),
np.int32(B),
np.int32(N),
np.int32(P),
d_coord_shift,
scaled_vsize_gpu,
scaled_vdim_gpu,
np.int32(grid_size_vol),
np.int32(max_o),
Holder(coor_2_occ_tensor),
Holder(occ_2_pnts_tensor),
Holder(occ_numpnts_tensor),
np.uint64(seconds),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
return coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor
def query_grid_point_index(self, h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, SR, K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, max_o, P, radius_limit_np, depth_limit_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock = 1024):
device = point_xyz_w_tensor.device
B, N = point_xyz_w_tensor.shape[0], point_xyz_w_tensor.shape[1]
pixel_size = scaled_vdim_np[0] * scaled_vdim_np[1]
grid_size_vol = pixel_size * scaled_vdim_np[2]
d_coord_shift = ranges_gpu[:3]
R, D = raypos_tensor.shape[1], raypos_tensor.shape[2]
R = pixel_idx_tensor.reshape(B, -1, 2).shape[1]
gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor = self.build_occ_vox(point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, query_size_gpu, grid_size_vol, d_coord_shift)
# torch.cuda.synchronize()
# print("coor_occ_tensor", torch.min(coor_occ_tensor), torch.max(coor_occ_tensor), torch.min(occ_2_coor_tensor), torch.max(occ_2_coor_tensor), torch.min(coor_2_occ_tensor), torch.max(coor_2_occ_tensor), torch.min(occ_idx_tensor), torch.max(occ_idx_tensor), torch.min(occ_numpnts_tensor), torch.max(occ_numpnts_tensor), torch.min(occ_2_pnts_tensor), torch.max(occ_2_pnts_tensor), occ_2_pnts_tensor.shape)
# print("occ_numpnts_tensor", torch.sum(occ_numpnts_tensor > 0), ranges_np)
# vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor)
raypos_mask_tensor = torch.zeros([B, R, D], dtype=torch.int32, device=device)
gridSize = int((B * R * D + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.mask_raypos(
Holder(raypos_tensor), # [1, 2048, 400, 3]
Holder(coor_occ_tensor), # [1, 2048, 400, 3]
np.int32(B),
np.int32(R),
np.int32(D),
np.int32(grid_size_vol),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
Holder(raypos_mask_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)
)
# torch.cuda.synchronize()
# print("raypos_mask_tensor", raypos_mask_tensor.shape, torch.sum(coor_occ_tensor), torch.sum(raypos_mask_tensor))
# save_points(raypos_tensor.reshape(-1, 3), "./", "rawraypos_pnts")
# raypos_masked = torch.masked_select(raypos_tensor, raypos_mask_tensor[..., None] > 0)
# save_points(raypos_masked.reshape(-1, 3), "./", "raypos_pnts")
ray_mask_tensor = torch.max(raypos_mask_tensor, dim=-1)[0] > 0 # B, R
R = torch.sum(ray_mask_tensor.to(torch.int32)).cpu().numpy()
# print("R", torch.sum(ray_mask_tensor.to(torch.int32)), R)
sample_loc_tensor = torch.zeros([B, R, SR, 3], dtype=torch.float32, device=device)
sample_pidx_tensor = torch.full([B, R, SR, K], -1, dtype=torch.int32, device=device)
if R > 0:
raypos_tensor = torch.masked_select(raypos_tensor, ray_mask_tensor[..., None, None].expand(-1, -1, D, 3)).reshape(B, R, D, 3)
raypos_mask_tensor = torch.masked_select(raypos_mask_tensor, ray_mask_tensor[..., None].expand(-1, -1, D)).reshape(B, R, D)
# print("R", R, raypos_tensor.shape, raypos_mask_tensor.shape)
raypos_maskcum = torch.cumsum(raypos_mask_tensor, dim=-1).to(torch.int32)
raypos_mask_tensor = (raypos_mask_tensor * raypos_maskcum * (raypos_maskcum <= SR)) - 1
sample_loc_mask_tensor = torch.zeros([B, R, SR], dtype=torch.int32, device=device)
self.get_shadingloc(
Holder(raypos_tensor), # [1, 2048, 400, 3]
Holder(raypos_mask_tensor),
np.int32(B),
np.int32(R),
np.int32(D),
np.int32(SR),
Holder(sample_loc_tensor),
Holder(sample_loc_mask_tensor),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)
)
# torch.cuda.synchronize()
# print("shadingloc_mask_tensor", torch.sum(sample_loc_mask_tensor, dim=-1), torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0), torch.sum(sample_loc_mask_tensor > 0))
# shadingloc_masked = torch.masked_select(sample_loc_tensor, sample_loc_mask_tensor[..., None] > 0)
# save_points(shadingloc_masked.reshape(-1, 3), "./", "shading_pnts{}".format(self.count))
seconds = time.time()
gridSize = int((B * R * SR + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock)
self.query_along_ray(
Holder(point_xyz_w_tensor),
np.int32(B),
np.int32(SR),
np.int32(R),
np.int32(max_o),
np.int32(P),
np.int32(K),
np.int32(grid_size_vol),
np.float32(radius_limit_np ** 2),
d_coord_shift,
scaled_vdim_gpu,
scaled_vsize_gpu,
kernel_size_gpu,
Holder(occ_numpnts_tensor),
Holder(occ_2_pnts_tensor),
Holder(coor_2_occ_tensor),
Holder(sample_loc_tensor),
Holder(sample_loc_mask_tensor),
Holder(sample_pidx_tensor),
np.uint64(seconds),
np.int32(self.opt.NN),
block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1))
# torch.cuda.synchronize()
# print("point_xyz_w_tensor",point_xyz_w_tensor.shape)
# queried_masked = point_xyz_w_tensor[0][sample_pidx_tensor.reshape(-1).to(torch.int64), :]
# save_points(queried_masked.reshape(-1, 3), "./", "queried_pnts{}".format(self.count))
# print("valid ray", torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0))
#
masked_valid_ray = torch.sum(sample_pidx_tensor.view(B, R, -1) >= 0, dim=-1) > 0
R = torch.max(torch.sum(masked_valid_ray.to(torch.int32), dim=-1)).cpu().numpy()
ray_mask_tensor.masked_scatter_(ray_mask_tensor, masked_valid_ray)
sample_pidx_tensor = torch.masked_select(sample_pidx_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, K)).reshape(B, R, SR, K)
sample_loc_tensor = torch.masked_select(sample_loc_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, 3)).reshape(B, R, SR, 3)
# self.count+=1
return sample_pidx_tensor, sample_loc_tensor, ray_mask_tensor.to(torch.int8)
def load_init_points(scan, data_dir="/home/xharlie/user_space/data/nrData/nerf/nerf_synthetic_colmap"):
points_path = os.path.join(data_dir, scan, "colmap_results/dense/fused.ply")
# points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply")
assert os.path.exists(points_path)
from plyfile import PlyData, PlyElement
plydata = PlyData.read(points_path)
# plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar'))
print("plydata", plydata.elements[0])
x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32)
points_xyz = torch.stack([x,y,z], dim=-1).to(torch.float32)
return points_xyz
def load_blender_data(basedir, splits, half_res=False, testskip=1):
splits = ['train', 'val', 'test'] if splits is None else splits
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if s=='train' or testskip==0:
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, frame['file_path'] + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']) @ blender2opencv)
imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
poses = np.array(poses).astype(np.float32)
counts.append(counts[-1] + imgs.shape[0])
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
H, W = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
stride = 20
render_poses = np.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, stride+1)[:-1]],0)
if half_res:
imgs = tf.image.resize_area(imgs, [400, 400]).numpy()
H = H//2
W = W//2
focal = focal/2.
intrinsic = np.asarray([[focal, 0, W/2],
[0, focal, H/2],
[0,0,1]])
return imgs, poses, render_poses, [H, W, focal], i_split, intrinsic
def get_dtu_raydir(pixelcoords, intrinsic, rot, dir_norm):
# rot is c2w
## pixelcoords: H x W x 2
x = (pixelcoords[..., 0] + 0.5 - intrinsic[0, 2]) / intrinsic[0, 0]
y = (pixelcoords[..., 1] + 0.5 - intrinsic[1, 2]) / intrinsic[1, 1]
z = np.ones_like(x)
dirs = np.stack([x, y, z], axis=-1)
# dirs = np.sum(dirs[...,None,:] * rot[:,:], axis=-1) # h*w*1*3 x 3*3
dirs = dirs @ rot[:,:].T #
if dir_norm:
# print("dirs",dirs-dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5))
dirs = dirs / (np.linalg.norm(dirs, axis=-1, keepdims=True) + 1e-5)
# print("dirs", dirs.shape)
return dirs
def try_build(ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj,
radius_limit, depth_limit, near_depth, far_depth, shading_count, split=["train"], imgidx=0, gpu=0, NN=2):
# point_path = os.path.join(point_dir, point_file)
# point_xyz = load_pnts(point_path, 819200000) # 81920 233872
point_xyz = load_init_points(obj)
imgs, poses, _, hwf, _, intrinsic = load_blender_data(
os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1)
H, W, focal = hwf
intrinsic = np.array([[focal, 0, W / 2], [0, focal, H / 2], [0, 0, 1]])
plt.figure()
plt.imshow(imgs[imgidx])
point_xyz_w_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None,...]
print("point_xyz_w_tensor", point_xyz_w_tensor[0].shape, torch.min(point_xyz_w_tensor[0], dim=0)[0], torch.max(point_xyz_w_tensor[0], dim=0)[0])
# plt.show()
actual_numpoints_tensor = torch.ones([1], device=point_xyz_w_tensor.device, dtype=torch.int32) * len(point_xyz_w_tensor[0])
# range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size)
pixel_idx_tensor = torch.as_tensor(pixel_idx, device=point_xyz_w_tensor.device, dtype=torch.int32)[None, ...]
c2w = poses[0]
print("c2w", c2w.shape, pixel_idx.shape)
from data.data_utils import get_dtu_raydir
cam_pos, camrot = c2w[:3, 3], c2w[:3, :3]
ray_dirs_tensor, cam_pos_tensor = torch.as_tensor(get_dtu_raydir(pixel_idx, intrinsic, camrot, True), device=pixel_idx_tensor.device, dtype=torch.float32), torch.as_tensor(cam_pos, device=pixel_idx_tensor.device, dtype=torch.float32)
from collections import namedtuple
opt_construct = namedtuple('opt', 'inverse vsize vscale kernel_size radius_limit_scale depth_limit_scale max_o P SR K gpu_maxthr NN ranges z_depth_dim')
opt = opt_construct(inverse=0, vscale=vscale, vsize=vsize, kernel_size=kernel_size, radius_limit_scale=0, depth_limit_scale=0, max_o=max_o, P=P, SR=SR, K=K, gpu_maxthr=1024, NN=NN, ranges=ranges, z_depth_dim=400)
querier = lighting_fast_querier(point_xyz_w_tensor.device, opt)
print("actual_numpoints_tensor", actual_numpoints_tensor)
querier.query_points(pixel_idx_tensor, None, point_xyz_w_tensor, actual_numpoints_tensor, H, W, intrinsic, near_depth, far_depth, ray_dirs_tensor[None, ...], cam_pos_tensor[None, ...]) | null |
156,836 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def w2img(point_xyz, transform_matrix, focal):
camrot = transform_matrix[:3, :3] # world 2 cam
campos = transform_matrix[:3, 3] #
point_xyz_shift = point_xyz - campos[None, :]
# xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1)
xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2)
# print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape)
xper = xyz[:, 0] / -xyz[:, 2]
yper = xyz[:, 1] / xyz[:, 2]
x_pixel = np.round(xper * focal + 400).astype(np.int32)
y_pixel = np.round(yper * focal + 400).astype(np.int32)
print("focal", focal, np.tan(.5 * 0.6911112070083618))
print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel))
print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:",
np.max(xyz[:, 2]), np.min(xyz[:, 2]))
print("min perx", -400 / focal, "max perx", 400 / focal)
background = np.ones([800, 800, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .2
plt.figure()
plt.imshow(background)
return np.stack([xper, yper, -xyz[:, 2]], axis=-1) | null |
156,837 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images')
image_file = os.path.join(image_dir) | null |
156,838 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def save_points(xyz, dir, filename):
def vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor):
print("ranges_np", ranges_np, scaled_vsize_np)
mask = coor_2_occ_tensor.cpu().numpy() > 0
xdim, ydim, zdim = coor_2_occ_tensor.shape[1:]
x_ = np.arange(0, xdim)
y_ = np.arange(0, ydim)
z_ = np.arange(0, zdim)
x, y, z = np.meshgrid(x_, y_, z_, indexing='ij')
xyz = np.stack([x,y,z], axis=-1).reshape(-1,3).astype(np.float32)
xyz = ranges_np[None, :3] + (xyz + 0.5) * scaled_vsize_np[None, :]
xyz = xyz[mask.reshape(-1)]
save_points(xyz, "./", "occ_xyz")
print(xyz.shape) | null |
156,839 | import os
import numpy as np
from numpy import dot
from math import sqrt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import matplotlib.pyplot as plt
import torch
import pickle
import time
from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation
from data.load_blender import load_blender_data
def render_mask_pers_points(queried_point_xyz, vsize, ranges, w, h):
pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32)
print(pixel_xy_inds.shape)
y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .5
plt.figure()
plt.imshow(background)
def render_pixel_mask(pixel_xy_inds, w, h):
y_pixel, x_pixel = pixel_xy_inds[0, :, 1], pixel_xy_inds[0, :, 0]
background = np.ones([h, w, 3], dtype=np.float32)
background[y_pixel, x_pixel, :] = .0
plt.figure()
plt.imshow(background)
def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor, pixel_idx_cur_tensor, vdim, vsize, ranges):
B, R, SR, K = sample_pidx_tensor.shape
# pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64)
point_inds = sample_pidx_tensor[0, :, :, :]
# point_inds = sample_pidx_tensor[0, pixel_inds, :, :]
mask = point_inds > -1
point_inds = torch.masked_select(point_inds, mask).to(torch.int64)
queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :]
queried_point_xyz = queried_point_xyz_tensor.cpu().numpy()
print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape,
queried_point_xyz.shape)
print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape)
render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1])
render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1])
plt.show() | null |
156,840 | import torch
The provided code snippet includes necessary dependencies for implementing the `homogenize` function. Write a Python function `def homogenize(m)` to solve the following problem:
Adds homogeneous coordinates to a [..., N,N] matrix, returning [..., N+1, N+1].
Here is the function:
def homogenize(m):
"""Adds homogeneous coordinates to a [..., N,N] matrix, returning [..., N+1, N+1]."""
assert m.shape[-1] == m.shape[-2] # Must be square
n = m.shape[-1]
eye_n_plus_1 = torch.eye(n + 1).cuda().expand(list(m.shape[:-2]) + [-1, -1])
extra_col = eye_n_plus_1[..., :-1, -1:]
extra_row = eye_n_plus_1[..., -1:, :]
including_col = torch.cat([m, extra_col], dim=-1)
return torch.cat([including_col, extra_row], dim=-2) | Adds homogeneous coordinates to a [..., N,N] matrix, returning [..., N+1, N+1]. |
156,841 | import torch
def roll_pitch_yaw_to_rotation_matrices(roll_pitch_yaw):
"""Converts roll-pitch-yaw angles to rotation matrices.
Args:
roll_pitch_yaw: Tensor with shape [..., 3]. The last dimension contains
the roll, pitch, and yaw angles in radians. The resulting matrix
rotates points by first applying roll around the x-axis, then pitch
around the y-axis, then yaw around the z-axis.
Returns:
Tensor with shape [..., 3, 3]. The 3x3 rotation matrices corresponding to
the input roll-pitch-yaw angles.
"""
cosines = torch.cos(roll_pitch_yaw)
sines = torch.sin(roll_pitch_yaw)
cx, cy, cz = torch.unbind(cosines, dim=-1)
sx, sy, sz = torch.unbind(sines, dim=-1)
# pyformat: disable
rotation = torch.stack(
[cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx,
sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx,
-sy, cy * sx, cy * cx], dim=-1)
# pyformat: enable
#shape = torch.cat([roll_pitch_yaw.shape[:-1], [3, 3]], axis=0)
shape = list(roll_pitch_yaw.shape[:-1]) + [3, 3]
rotation = torch.reshape(rotation, shape)
return rotation
The provided code snippet includes necessary dependencies for implementing the `compute_world2local_dist` function. Write a Python function `def compute_world2local_dist(dists, radii, rotations)` to solve the following problem:
Computes a transformation to the local element frames for encoding.
Here is the function:
def compute_world2local_dist(dists, radii, rotations):
"""Computes a transformation to the local element frames for encoding."""
# We assume the center is an XYZ position for this transformation:
# TODO(kgenova) Update this transformation to account for rotation.
# assert len(dists.shape) == 3
# batch_size, element_count = dists.shape[:2]
# eye_3x3 = torch.eye(3).cuda().expand([batch_size, element_count, -1, -1])
# eye_4x4 = torch.eye(4).cuda().expand([batch_size, element_count, -1, -1])
# Centering transform
# ones = torch.ones([batch_size, element_count, 1, 1])
dists = dists[..., None]
# tx = torch.cat([eye_3x3, -dists], dim=-1)
# tx = torch.cat([tx, eye_4x4[..., 3:4, :]], dim=-2) # Append last row
# Compute the inverse rotation:
rotation = roll_pitch_yaw_to_rotation_matrices(rotations) # torch.inverse(roll_pitch_yaw_to_rotation_matrices(rotations))
# print("rotation", rotation[0,0])
assert rotation.shape[-2:] == (3, 3)
# Compute a scale transformation:
diag = 1.0 / (radii + 1e-8)
scale = torch.diag_embed(diag)
# Apply both transformations and return the transformed points.
tx3x3 = torch.matmul(scale, rotation)
return torch.matmul(tx3x3, dists) #, torch.matmul(homogenize(tx3x3), tx) | Computes a transformation to the local element frames for encoding. |
156,842 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
def get_nonlinearity_layer(activation_type='PReLU'):
if activation_type == 'ReLU':
nonlinearity_layer = nn.ReLU(True)
elif activation_type == 'SELU':
nonlinearity_layer = nn.SELU(True)
elif activation_type == 'LeakyReLU':
nonlinearity_layer = nn.LeakyReLU(0.1, True)
elif activation_type == 'PReLU':
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError('activation layer [{}] is not found'.format(activation_type))
return nonlinearity_layer | null |
156,843 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
# norm_layer = functools.partial(nn.InstanceNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'group':
norm_layer = functools.partial(nn.GroupNorm, num_groups=16, affine=True)
elif norm_type == 'layer':
norm_layer = nn.LayerNorm
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer | null |
156,844 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(it):
lr_l = 1.0 - max(0, it - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.2,
threshold=0.01,
patience=5)
elif opt.lr_policy == 'iter_exponential_decay':
def lambda_rule(it):
lr_l = pow(opt.lr_decay_exp, it / opt.lr_decay_iters)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'cosine_annealing':
scheduler = CosineAnnealingLR(optimizer, T_max=self.args.num_epochs, eta_min=1e-7)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler | null |
156,845 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `positional_encoding` function. Write a Python function `def positional_encoding(positions, freqs, ori=False)` to solve the following problem:
encode positions with positional encoding positions: :math:`(...,D)` freqs: int Return: pts: :math:`(..., 2DF)`
Here is the function:
def positional_encoding(positions, freqs, ori=False):
'''encode positions with positional encoding
positions: :math:`(...,D)`
freqs: int
Return:
pts: :math:`(..., 2DF)`
'''
freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,)
ori_c = positions.shape[-1]
pts = (positions[..., None] * freq_bands).reshape(positions.shape[:-1] +
(freqs * positions.shape[-1], )) # (..., DF)
if ori:
pts = torch.cat([positions, torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2+ori_c,))
else:
pts = torch.stack([torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2,))
return pts | encode positions with positional encoding positions: :math:`(...,D)` freqs: int Return: pts: :math:`(..., 2DF)` |
156,846 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import format as fmt
import os
from .base_model import BaseModel
from .rendering.diff_render_func import find_render_function, find_blend_function, find_tone_map, alpha_blend
from .rendering.diff_ray_marching import find_ray_generation_method, find_refined_ray_generation_method, ray_march, alpha_ray_march
from utils import format as fmt
from utils.spherical import SphericalHarm, SphericalHarm_table
from utils.util import add_property2dict
from torch.autograd import Variable
from PIL import Image
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) | null |
156,847 | import torch
import torch.nn.functional as F
from .mvs_utils import normal_vect, index_point_feature, build_color_volume
def ndc2dist(ndc_pts, cos_angle):
dists = torch.norm(ndc_pts[:, 1:] - ndc_pts[:, :-1], dim=-1)
dists = torch.cat([dists, 1e10*cos_angle.unsqueeze(-1)], -1) # [N_rays, N_samples]
return dists | null |
156,848 | import torch
import torch.nn.functional as F
from .mvs_utils import normal_vect, index_point_feature, build_color_volume
def normal_vect(vect, dim=-1):
return vect / (torch.sqrt(torch.sum(vect**2,dim=dim,keepdim=True))+1e-7)
The provided code snippet includes necessary dependencies for implementing the `gen_angle_feature` function. Write a Python function `def gen_angle_feature(c2ws, rays_pts, rays_dir)` to solve the following problem:
Inputs: c2ws: [1,v,4,4] rays_pts: [N_rays, N_samples, 3] rays_dir: [N_rays, 3] Returns:
Here is the function:
def gen_angle_feature(c2ws, rays_pts, rays_dir):
"""
Inputs:
c2ws: [1,v,4,4]
rays_pts: [N_rays, N_samples, 3]
rays_dir: [N_rays, 3]
Returns:
"""
N_rays, N_samples = rays_pts.shape[:2]
dirs = normal_vect(rays_pts.unsqueeze(2) - c2ws[:3, :3, 3][None, None]) # [N_rays, N_samples, v, 3]
angle = torch.sum(dirs[:, :, :3] * rays_dir.reshape(N_rays,1,1,3), dim=-1, keepdim=True).reshape(N_rays, N_samples, -1)
return angle | Inputs: c2ws: [1,v,4,4] rays_pts: [N_rays, N_samples, 3] rays_dir: [N_rays, 3] Returns: |
156,849 | import torch
import torch.nn.functional as F
from .mvs_utils import normal_vect, index_point_feature, build_color_volume
def depth2dist(z_vals, cos_angle):
# z_vals: [N_ray N_sample]
device = z_vals.device
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([1e10]).to(device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]
dists = dists * cos_angle.unsqueeze(-1)
return dists
def raw2outputs(raw, z_vals, dists, white_bkgd=False, net_type='v2'):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
device = z_vals.device
rgb = raw[..., :3] # [N_rays, N_samples, 3]
alpha, weights, alpha_softmax = raw2alpha(raw[..., 3], dists, net_type) # [N_rays, N_samples]
rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
disp_map = 1. / torch.max(1e-10 * torch.ones_like(depth_map, device=device), depth_map / torch.sum(weights, -1))
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1. - acc_map[..., None])
return rgb_map, disp_map, acc_map, weights, depth_map, alpha
def gen_dir_feature(w2c_ref, rays_dir):
"""
Inputs:
c2ws: [1,v,4,4]
rays_pts: [N_rays, N_samples, 3]
rays_dir: [N_rays, 3]
Returns:
"""
dirs = rays_dir @ w2c_ref[:3,:3].t() # [N_rays, 3]
return dirs
def gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, feat_dim, img_feat=None, img_downscale=1.0, use_color_volume=False, net_type='v0'):
N_rays, N_samples = rays_pts.shape[:2]
if img_feat is not None:
feat_dim += img_feat.shape[1]*img_feat.shape[2]
if not use_color_volume:
input_feat = torch.empty((N_rays, N_samples, feat_dim), device=imgs.device, dtype=torch.float)
ray_feats = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc)
input_feat[..., :8] = ray_feats
input_feat[..., 8:] = build_color_volume(rays_pts, pose_ref, imgs, img_feat, with_mask=True, downscale=img_downscale)
else:
input_feat = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc)
return input_feat
def rendering(args, pose_ref, rays_pts, rays_ndc, depth_candidates, rays_o, rays_dir,
volume_feature=None, imgs=None, network_fn=None, img_feat=None, network_query_fn=None, white_bkgd=False, **kwargs):
# rays angle
cos_angle = torch.norm(rays_dir, dim=-1)
# using direction
if pose_ref is not None:
angle = gen_dir_feature(pose_ref['w2cs'][0], rays_dir/cos_angle.unsqueeze(-1)) # view dir feature
else:
angle = rays_dir/cos_angle.unsqueeze(-1)
# rays_pts
input_feat = gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, args.feat_dim, \
img_feat, args.img_downscale, args.use_color_volume, args.net_type)
# rays_ndc = rays_ndc * 2 - 1.0
# network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats,
# network_fn,
# embed_fn=embed_fn,
# embeddirs_fn=embeddirs_fn,
# netchunk=args.netchunk)
# run_network_mvs
raw = network_query_fn(rays_ndc, angle, input_feat, network_fn)
if raw.shape[-1]>4:
input_feat = torch.cat((input_feat[...,:8],raw[...,4:]), dim=-1)
dists = depth2dist(depth_candidates, cos_angle)
# dists = ndc2dist(rays_ndc)
rgb_map, disp_map, acc_map, weights, depth_map, alpha = raw2outputs(raw, depth_candidates, dists, white_bkgd,args.net_type)
ret = {}
return rgb_map, input_feat, weights, depth_map, alpha, ret | null |
156,850 | import torch
import torch.nn.functional as F
from .mvs_utils import normal_vect, index_point_feature, build_color_volume
def render_density(network_fn, rays_pts, density_feature, network_query_fn, chunk=1024 * 5):
densities = []
device = density_feature.device
for i in range(0, rays_pts.shape[0], chunk):
input_feat = rays_pts[i:i + chunk].to(device)
density = network_query_fn(input_feat, None, density_feature[i:i + chunk], network_fn)
densities.append(density)
return torch.cat(densities) | null |
156,851 | import torch
import torch.nn as nn
from .mvs_utils import *
from .mvs_utils import homo_warp
from inplace_abn import InPlaceABN
from .renderer import run_network_mvs
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
def weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
nn.init.zeros_(m.bias.data) | null |
156,852 | import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
from .mvs_utils import *
from .mvs_utils import homo_warp
from inplace_abn import InPlaceABN
from .renderer import run_network_mvs
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_embedder(multires, i=0, input_dims=3):
if i == -1:
return nn.Identity(), 3
embed_kwargs = {
'include_input' : True,
'input_dims' : input_dims,
'max_freq_log2' : multires-1,
'num_freqs' : multires,
'log_sampling' : True,
'periodic_fns' : [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj : eo.embed(x)
return embed, embedder_obj.out_dim
class MVSNeRF(nn.Module):
def __init__(self, D=8, W=256, input_ch_pts=3, input_ch_views=3, input_ch_feat=8, skips=[4], net_type='v2'):
"""
"""
super(MVSNeRF, self).__init__()
self.in_ch_pts, self.in_ch_views,self.in_ch_feat = input_ch_pts, input_ch_views, input_ch_feat
# we provide two version network structure
if 'v0' == net_type:
self.nerf = Renderer_ours(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
elif 'v1' == net_type:
self.nerf = Renderer_attention(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
elif 'v2' == net_type:
self.nerf = Renderer_linear(D=D, W=W,input_ch_feat=input_ch_feat,
input_ch=input_ch_pts, output_ch=4, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=True)
def forward_alpha(self, x):
return self.nerf.forward_alpha(x)
def forward(self, x):
RGBA = self.nerf(x)
return RGBA
class FeatureNet(nn.Module):
"""
output 3 levels of features using a FPN structure
"""
def __init__(self, intermediate=False, norm_act=InPlaceABN):
super(FeatureNet, self).__init__()
self.conv0 = nn.Sequential(
ConvBnReLU(3, 8, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(8, 8, 3, 1, 1, norm_act=norm_act))
self.conv1 = nn.Sequential(
ConvBnReLU(8, 16, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act))
self.conv2 = nn.Sequential(
ConvBnReLU(16, 32, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act))
self.toplayer = nn.Conv2d(32, 32, 1)
self.intermediate = intermediate
def _upsample_add(self, x, y):
return F.interpolate(x, scale_factor=2,
mode="bilinear", align_corners=True) + y
def forward(self, x):
B, V, _, H, W = x.shape
x = x.reshape(B * V, 3, H, W)
if self.intermediate:
x1 = self.conv0(x) # (B, 8, H, W)
x2 = self.conv1(x1) # (B, 16, H//2, W//2)
x3 = self.conv2(x2) # (B, 32, H//4, W//4)
x3 = self.toplayer(x3) # (B, 32, H//4, W//4)
return [x, x1, x2, x3]
else:
# x: (B, 3, H, W)
x = self.conv0(x) # (B, 8, H, W)
x = self.conv1(x) # (B, 16, H//2, W//2)
x = self.conv2(x) # (B, 32, H//4, W//4)
x = self.toplayer(x) # (B, 32, H//4, W//4)
return [x]
class MVSNet(nn.Module):
def __init__(self,
depth=128,
num_groups=1,
norm_act=InPlaceABN,
levels=1):
super(MVSNet, self).__init__()
self.levels = levels # 3 depth levels
self.n_depths = [128,32,8]
self.G = num_groups # number of groups in groupwise correlation
self.N_importance = 0
self.chunk = 1024
self.D = depth
self.cost_reg_2 = CostRegNet(32+9, norm_act)
def build_volume_costvar(self, feats, proj_mats, depth_values, pad=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
ref_feats, src_feats = feats[:, 0], feats[:, 1:]
src_feats = src_feats.permute(1, 0, 2, 3, 4) # (V-1, B, C, h, w)
proj_mats = proj_mats[:, 1:]
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V-1, B, 3, 4)
if pad > 0:
ref_feats = F.pad(ref_feats, (pad, pad, pad, pad), "constant", 0)
ref_volume = ref_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = ref_volume
volume_sq_sum = ref_volume ** 2
del ref_feats
in_masks = torch.ones((B, 1, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_feat, proj_mat) in enumerate(zip(src_feats, proj_mats)):
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks += in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / in_masks
img_feat = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def build_volume_costvar_img(self, imgs, feats, proj_mats, depth_values, pad=0, vid=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
cur_feats, src_feats = feats[:, vid, ...], feats.permute(1, 0, 2, 3, 4) # (V, B, C, h, w)
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V, B, 3, 4)
if pad > 0:
cur_feats = F.pad(cur_feats, (pad, pad, pad, pad), "constant", 0)
img_feat = torch.empty((B, 9 + 32, D, *cur_feats.shape[-2:]), device=feats.device, dtype=torch.float)
imgs = F.interpolate(imgs.view(B * V, *imgs.shape[2:]), (H, W), mode='bilinear', align_corners=False).view(B, V,-1,H,W).permute(1, 0, 2, 3, 4)
img_feat[:, :3, :, pad:H + pad, pad:W + pad] = imgs[0].unsqueeze(2).expand(-1, -1, D, -1, -1)
cur_volume = cur_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = cur_volume
volume_sq_sum = cur_volume ** 2
del cur_feats
src_view_count = 0
in_masks = torch.ones((B, V, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_img, src_feat, proj_mat) in enumerate(zip(imgs, src_feats, proj_mats)):
# warped_volume: 1, 32, 128, 176, 208 B, D, H_pad, W_pad , grid B, D, W_pad, H_pad
if i == vid:
continue
src_view_count+=1
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
img_feat[:, src_view_count * 3:(src_view_count + 1) * 3], _ = homo_warp(src_img, proj_mat, depth_values, src_grid=grid, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks[:, src_view_count] = in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / torch.sum(in_masks, dim=1, keepdim=True)
img_feat[:, -32:] = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def forward(self, imgs, feats, proj_mats, near_far, pad=0, return_color=False, lindisp=False, vid=0):
# imgs: (B, V, 3, H, W)
# proj_mats: (B, V, 3, 4) from fine to coarse
# init_depth_min, depth_interval: (B) or float
# near_far (B, V, 2)
B, V, _, H, W = imgs.shape
imgs = imgs.reshape(B * V, 3, H, W)
imgs = imgs.view(B, V, 3, H, W)
feats_l = feats[-1] # (B*V, C, h, w)
feats_l = feats_l.view(B, V, *feats_l.shape[1:]) # (B, V, C, h, w)
t_vals = torch.linspace(0., 1., steps=self.D, device=imgs.device, dtype=imgs.dtype) # (B, D)
near, far = near_far # assume batch size==1
if not lindisp:
depth_values = near * (1.-t_vals) + far * (t_vals)
else:
depth_values = 1. / (1. / near * (1. - t_vals) + 1. / far * (t_vals))
# print("near , far", near, far)
# print("depth_values", depth_values)
depth_values = depth_values.unsqueeze(0)
# volume_feat, in_masks = self.build_volume_costvar(feats_l, proj_mats, depth_values, pad=pad)
volume_feat, in_masks = self.build_volume_costvar_img(imgs, feats_l, proj_mats, depth_values, pad=pad, vid=vid)
if return_color:
feats_l = torch.cat((volume_feat[:,:V*3].view(B, V, 3, *volume_feat.shape[2:]),in_masks.unsqueeze(2)),dim=2)
# print("pre cost volume_feat", volume_feat.shape) ([1, 41, 128, 176, 208])
volume_feat = self.cost_reg_2(volume_feat) # (B, 1, D, h, w)
volume_feat = volume_feat.reshape(1,-1,*volume_feat.shape[2:])
return volume_feat, feats, depth_values
import torch.nn.functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
def run_network_mvs(pts, viewdirs, alpha_feat, fn, embed_fn, embeddirs_fn, netchunk=1024):
"""
Prepares inputs and applies network 'fn'.
"""
if embed_fn is not None:
pts = embed_fn(pts)
if alpha_feat is not None:
pts = torch.cat((pts,alpha_feat), dim=-1)
if viewdirs is not None:
if viewdirs.dim()!=3:
viewdirs = viewdirs[:, None].expand(-1,pts.shape[1],-1)
if embeddirs_fn is not None:
viewdirs = embeddirs_fn(viewdirs)
pts = torch.cat([pts, viewdirs], -1)
alpha_only = viewdirs is None
outputs_flat = batchify(fn, netchunk)(pts, alpha_only)
outputs = torch.reshape(outputs_flat, list(pts.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
class MVSNet(nn.Module):
def __init__(self, refine=False):
super(MVSNet, self).__init__()
self.refine = refine
self.feature = FeatureNet()
self.cost_regularization = CostRegNet()
if self.refine:
self.refine_network = RefineNet()
def forward(self, imgs, proj_matrices, depth_values, features=None, prob_only=False):
imgs = torch.unbind(imgs, 1)
num_depth = depth_values.shape[1]
num_views = len(imgs)
# step 1. feature extraction
# in: images; out: 32-channel feature maps
if features is None:
features = [self.feature(img) for img in imgs]
# step 2. differentiable homograph, build cost volume
volume_sum = 0
volume_sq_sum = 0
for vid in range(num_views):
# warpped features
warped_volume = homo_warping(features[vid], proj_matrices[:, vid], depth_values)
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2) # the memory of warped_volume has been modified
del warped_volume
volume_variance = volume_sq_sum.div_(num_views).sub_(volume_sum.div_(num_views).pow_(2))
# step 3. cost volume regularization
cost_reg = self.cost_regularization(volume_variance)
cost_reg = cost_reg.squeeze(1)
prob_volume = F.softmax(cost_reg, dim=1)
if prob_only:
return features, prob_volume, cost_reg
depth = depth_regression(prob_volume, depth_values=depth_values)
with torch.no_grad():
# photometric confidence
prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0).squeeze(1)
depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device, dtype=torch.float)).long()
photometric_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1)
# step 4. depth map refinement
if not self.refine:
return depth, photometric_confidence, features, prob_volume # {"depth": depth, "photometric_confidence": photometric_confidence}
else:
refined_depth = self.refine_network(torch.cat((imgs[0], depth), 1))
return {"depth": depth, "refined_depth": refined_depth, "photometric_confidence": photometric_confidence}
The provided code snippet includes necessary dependencies for implementing the `create_nerf_mvs` function. Write a Python function `def create_nerf_mvs(args, pts_embedder=True, use_mvs=False, dir_embedder=True, Depth=128)` to solve the following problem:
Instantiate mvs NeRF's MLP model.
Here is the function:
def create_nerf_mvs(args, pts_embedder=True, use_mvs=False, dir_embedder=True, Depth=128):
"""Instantiate mvs NeRF's MLP model.
"""
if pts_embedder:
embed_fn, input_ch = get_embedder(args.multires, args.i_embed, input_dims=args.pts_dim)
else:
embed_fn, input_ch = None, args.pts_dim
embeddirs_fn = None
if dir_embedder:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed, input_dims=args.dir_dim)
else:
embeddirs_fn, input_ch_views = None, args.dir_dim
skips = [4]
model = MVSNeRF(D=args.netdepth, W=args.netwidth,
input_ch_pts=input_ch, skips=skips,
input_ch_views=input_ch_views, input_ch_feat=args.feat_dim, net_type=args.net_type).to(device)
grad_vars = []
grad_vars += list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = MVSNeRF(D=args.netdepth, W=args.netwidth,
input_ch_pts=input_ch, skips=skips,
input_ch_views=input_ch_views, input_ch_feat=args.feat_dim).to(device)
grad_vars += list(model_fine.parameters())
network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk)
EncodingNet = None
net_2d = FeatureNet(intermediate=True)
if use_mvs:
EncodingNet = MVSNet(net_2d, Depth=Depth).to(device)
grad_vars += list(EncodingNet.parameters()) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
start = 0
##########################
# Load checkpoints
ckpts = []
if args.ckpt is not None and args.ckpt != 'None':
ckpts = [args.ckpt]
print('Found ckpts', ckpts)
if len(ckpts) > 0 :
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
# Load model
if use_mvs:
state_dict = ckpt['network_mvs_state_dict']
EncodingNet.load_state_dict(state_dict)
model.load_state_dict(ckpt['network_fn_state_dict'])
# if model_fine is not None:
# model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn': network_query_fn,
'perturb': args.perturb,
'N_importance': args.N_importance,
'network_fine': model_fine,
'N_samples': args.N_samples,
'network_fn': model,
'network_mvs': EncodingNet,
'use_viewdirs': args.use_viewdirs,
'white_bkgd': args.white_bkgd,
'raw_noise_std': args.raw_noise_std,
}
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
return render_kwargs_train, render_kwargs_test, start, grad_vars | Instantiate mvs NeRF's MLP model. |
156,853 | import torch
import torch.nn as nn
from .mvs_utils import *
from .mvs_utils import homo_warp
from inplace_abn import InPlaceABN
from .renderer import run_network_mvs
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class FeatureNet(nn.Module):
"""
output 3 levels of features using a FPN structure
"""
def __init__(self, intermediate=False, norm_act=InPlaceABN):
super(FeatureNet, self).__init__()
self.conv0 = nn.Sequential(
ConvBnReLU(3, 8, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(8, 8, 3, 1, 1, norm_act=norm_act))
self.conv1 = nn.Sequential(
ConvBnReLU(8, 16, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act))
self.conv2 = nn.Sequential(
ConvBnReLU(16, 32, 5, 2, 2, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act),
ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act))
self.toplayer = nn.Conv2d(32, 32, 1)
self.intermediate = intermediate
def _upsample_add(self, x, y):
return F.interpolate(x, scale_factor=2,
mode="bilinear", align_corners=True) + y
def forward(self, x):
B, V, _, H, W = x.shape
x = x.reshape(B * V, 3, H, W)
if self.intermediate:
x1 = self.conv0(x) # (B, 8, H, W)
x2 = self.conv1(x1) # (B, 16, H//2, W//2)
x3 = self.conv2(x2) # (B, 32, H//4, W//4)
x3 = self.toplayer(x3) # (B, 32, H//4, W//4)
return [x, x1, x2, x3]
else:
# x: (B, 3, H, W)
x = self.conv0(x) # (B, 8, H, W)
x = self.conv1(x) # (B, 16, H//2, W//2)
x = self.conv2(x) # (B, 32, H//4, W//4)
x = self.toplayer(x) # (B, 32, H//4, W//4)
return [x]
class MVSNet(nn.Module):
def __init__(self,
depth=128,
num_groups=1,
norm_act=InPlaceABN,
levels=1):
super(MVSNet, self).__init__()
self.levels = levels # 3 depth levels
self.n_depths = [128,32,8]
self.G = num_groups # number of groups in groupwise correlation
self.N_importance = 0
self.chunk = 1024
self.D = depth
self.cost_reg_2 = CostRegNet(32+9, norm_act)
def build_volume_costvar(self, feats, proj_mats, depth_values, pad=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
ref_feats, src_feats = feats[:, 0], feats[:, 1:]
src_feats = src_feats.permute(1, 0, 2, 3, 4) # (V-1, B, C, h, w)
proj_mats = proj_mats[:, 1:]
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V-1, B, 3, 4)
if pad > 0:
ref_feats = F.pad(ref_feats, (pad, pad, pad, pad), "constant", 0)
ref_volume = ref_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = ref_volume
volume_sq_sum = ref_volume ** 2
del ref_feats
in_masks = torch.ones((B, 1, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_feat, proj_mat) in enumerate(zip(src_feats, proj_mats)):
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks += in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / in_masks
img_feat = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def build_volume_costvar_img(self, imgs, feats, proj_mats, depth_values, pad=0, vid=0):
# feats: (B, V, C, H, W)
# proj_mats: (B, V, 3, 4)
# depth_values: (B, D, H, W)
# cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w)
# volume_sum [B, G, D, h, w]
# prob_volume [B D H W]
# volume_feature [B C D H W]
B, V, C, H, W = feats.shape
D = depth_values.shape[1]
cur_feats, src_feats = feats[:, vid, ...], feats.permute(1, 0, 2, 3, 4) # (V, B, C, h, w)
proj_mats = proj_mats.permute(1, 0, 2, 3) # (V, B, 3, 4)
if pad > 0:
cur_feats = F.pad(cur_feats, (pad, pad, pad, pad), "constant", 0)
img_feat = torch.empty((B, 9 + 32, D, *cur_feats.shape[-2:]), device=feats.device, dtype=torch.float)
imgs = F.interpolate(imgs.view(B * V, *imgs.shape[2:]), (H, W), mode='bilinear', align_corners=False).view(B, V,-1,H,W).permute(1, 0, 2, 3, 4)
img_feat[:, :3, :, pad:H + pad, pad:W + pad] = imgs[0].unsqueeze(2).expand(-1, -1, D, -1, -1)
cur_volume = cur_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w)
volume_sum = cur_volume
volume_sq_sum = cur_volume ** 2
del cur_feats
src_view_count = 0
in_masks = torch.ones((B, V, D, H + pad * 2, W + pad * 2), device=volume_sum.device)
for i, (src_img, src_feat, proj_mat) in enumerate(zip(imgs, src_feats, proj_mats)):
# warped_volume: 1, 32, 128, 176, 208 B, D, H_pad, W_pad , grid B, D, W_pad, H_pad
if i == vid:
continue
src_view_count+=1
warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad)
img_feat[:, src_view_count * 3:(src_view_count + 1) * 3], _ = homo_warp(src_img, proj_mat, depth_values, src_grid=grid, pad=pad)
grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2)
in_mask = ((grid > -1.0) * (grid < 1.0))
in_mask = (in_mask[..., 0] * in_mask[..., 1])
in_masks[:, src_view_count] = in_mask.float()
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2)
del warped_volume, src_feat, proj_mat
del src_feats, proj_mats
count = 1.0 / torch.sum(in_masks, dim=1, keepdim=True)
img_feat[:, -32:] = volume_sq_sum * count - (volume_sum * count) ** 2
del volume_sq_sum, volume_sum, count
return img_feat, in_masks
def forward(self, imgs, feats, proj_mats, near_far, pad=0, return_color=False, lindisp=False, vid=0):
# imgs: (B, V, 3, H, W)
# proj_mats: (B, V, 3, 4) from fine to coarse
# init_depth_min, depth_interval: (B) or float
# near_far (B, V, 2)
B, V, _, H, W = imgs.shape
imgs = imgs.reshape(B * V, 3, H, W)
imgs = imgs.view(B, V, 3, H, W)
feats_l = feats[-1] # (B*V, C, h, w)
feats_l = feats_l.view(B, V, *feats_l.shape[1:]) # (B, V, C, h, w)
t_vals = torch.linspace(0., 1., steps=self.D, device=imgs.device, dtype=imgs.dtype) # (B, D)
near, far = near_far # assume batch size==1
if not lindisp:
depth_values = near * (1.-t_vals) + far * (t_vals)
else:
depth_values = 1. / (1. / near * (1. - t_vals) + 1. / far * (t_vals))
# print("near , far", near, far)
# print("depth_values", depth_values)
depth_values = depth_values.unsqueeze(0)
# volume_feat, in_masks = self.build_volume_costvar(feats_l, proj_mats, depth_values, pad=pad)
volume_feat, in_masks = self.build_volume_costvar_img(imgs, feats_l, proj_mats, depth_values, pad=pad, vid=vid)
if return_color:
feats_l = torch.cat((volume_feat[:,:V*3].view(B, V, 3, *volume_feat.shape[2:]),in_masks.unsqueeze(2)),dim=2)
# print("pre cost volume_feat", volume_feat.shape) ([1, 41, 128, 176, 208])
volume_feat = self.cost_reg_2(volume_feat) # (B, 1, D, h, w)
volume_feat = volume_feat.reshape(1,-1,*volume_feat.shape[2:])
return volume_feat, feats, depth_values
class MVSNet(nn.Module):
def __init__(self, refine=False):
super(MVSNet, self).__init__()
self.refine = refine
self.feature = FeatureNet()
self.cost_regularization = CostRegNet()
if self.refine:
self.refine_network = RefineNet()
def forward(self, imgs, proj_matrices, depth_values, features=None, prob_only=False):
imgs = torch.unbind(imgs, 1)
num_depth = depth_values.shape[1]
num_views = len(imgs)
# step 1. feature extraction
# in: images; out: 32-channel feature maps
if features is None:
features = [self.feature(img) for img in imgs]
# step 2. differentiable homograph, build cost volume
volume_sum = 0
volume_sq_sum = 0
for vid in range(num_views):
# warpped features
warped_volume = homo_warping(features[vid], proj_matrices[:, vid], depth_values)
if self.training:
volume_sum = volume_sum + warped_volume
volume_sq_sum = volume_sq_sum + warped_volume ** 2
else:
volume_sum += warped_volume
volume_sq_sum += warped_volume.pow_(2) # the memory of warped_volume has been modified
del warped_volume
volume_variance = volume_sq_sum.div_(num_views).sub_(volume_sum.div_(num_views).pow_(2))
# step 3. cost volume regularization
cost_reg = self.cost_regularization(volume_variance)
cost_reg = cost_reg.squeeze(1)
prob_volume = F.softmax(cost_reg, dim=1)
if prob_only:
return features, prob_volume, cost_reg
depth = depth_regression(prob_volume, depth_values=depth_values)
with torch.no_grad():
# photometric confidence
prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0).squeeze(1)
depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device, dtype=torch.float)).long()
photometric_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1)
# step 4. depth map refinement
if not self.refine:
return depth, photometric_confidence, features, prob_volume # {"depth": depth, "photometric_confidence": photometric_confidence}
else:
refined_depth = self.refine_network(torch.cat((imgs[0], depth), 1))
return {"depth": depth, "refined_depth": refined_depth, "photometric_confidence": photometric_confidence}
The provided code snippet includes necessary dependencies for implementing the `create_mvs` function. Write a Python function `def create_mvs(args, mvs_mode=-1, depth=128)` to solve the following problem:
Instantiate mvs NeRF's MLP model.
Here is the function:
def create_mvs(args, mvs_mode=-1, depth=128):
"""Instantiate mvs NeRF's MLP model.
"""
net_2d = FeatureNet(intermediate=True).to(device)
EncodingNet = None
if mvs_mode == -1:
EncodingNet = MVSNet(depth=depth).to(device)
elif mvs_mode >= 1:
EncodingNet = Ofcl_MVSNet(refine=False).to(device)
EncodingNet.eval()
start = 0
render_kwargs_train = {
'network_featmvs': EncodingNet,
'network_2d': net_2d,
}
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
return render_kwargs_train, render_kwargs_test, start | Instantiate mvs NeRF's MLP model. |
156,854 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
mse2psnr2 = lambda x : -10. * np.log(x) / np.log(10.)
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def get_psnr(imgs_pred, imgs_gt):
psnrs = []
for (img,tar) in zip(imgs_pred,imgs_gt):
psnrs.append(mse2psnr2(np.mean((img - tar.cpu().numpy())**2)))
return np.array(psnrs) | null |
156,855 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def init_log(log, keys):
for key in keys:
log[key] = torch.tensor([0.0], dtype=float)
return log | null |
156,856 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
The provided code snippet includes necessary dependencies for implementing the `visualize_depth_numpy` function. Write a Python function `def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET)` to solve the following problem:
depth: (H, W)
Here is the function:
def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = cv2.applyColorMap(x, cmap)
return x_, [mi,ma] | depth: (H, W) |
156,857 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
The provided code snippet includes necessary dependencies for implementing the `visualize_depth` function. Write a Python function `def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET)` to solve the following problem:
depth: (H, W)
Here is the function:
def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
if type(depth) is not np.ndarray:
depth = depth.cpu().numpy()
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_, [mi,ma] | depth: (H, W) |
156,858 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def get_rays_mvs(H, W, intrinsic, c2w, N=1024, isRandom=True, is_precrop_iters=False, chunk=-1, idx=-1):
device = c2w.device
if isRandom:
if is_precrop_iters and torch.rand((1,)) > 0.3:
xs, ys = torch.randint(W//6, W-W//6, (N,)).float().to(device), torch.randint(H//6, H-H//6, (N,)).float().to(device)
else:
xs, ys = torch.randint(0,W,(N,)).float().to(device), torch.randint(0,H,(N,)).float().to(device)
else:
ys, xs = torch.meshgrid(torch.linspace(0, H - 1, H), torch.linspace(0, W - 1, W)) # pytorch's meshgrid has indexing='ij'
ys, xs = ys.reshape(-1), xs.reshape(-1)
if chunk>0:
ys, xs = ys[idx*chunk:(idx+1)*chunk], xs[idx*chunk:(idx+1)*chunk]
ys, xs = ys.to(device), xs.to(device)
dirs = torch.stack([(xs-intrinsic[0,2])/intrinsic[0,0], (ys-intrinsic[1,2])/intrinsic[1,1], torch.ones_like(xs)], -1) # use 1 instead of -1
rays_d = dirs @ c2w[:3,:3].t() # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,-1].clone()
pixel_coordinates = torch.stack((ys,xs)) # row col
return rays_o, rays_d, pixel_coordinates | null |
156,859 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def ndc_2_cam(ndc_xyz, near_far, intrinsic, W, H):
inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device)
cam_z = ndc_xyz[..., 2:3] * (near_far[1] - near_far[0]) + near_far[0]
cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z
cam_xyz = torch.cat([cam_xy, cam_z], dim=-1)
cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t())
return cam_xyz | null |
156,860 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def filter_keys(dict):
if 'N_samples' in dict.keys():
dict.pop('N_samples')
if 'ndc' in dict.keys():
dict.pop('ndc')
if 'lindisp' in dict.keys():
dict.pop('lindisp')
return dict | null |
156,861 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def sub_selete_data(data_batch, device, idx, filtKey=[], filtIndex=['view_ids_all','c2ws_all','scan','bbox','w2ref','ref2w','light_id','ckpt','idx']):
data_sub_selete = {}
for item in data_batch.keys():
data_sub_selete[item] = data_batch[item][:,idx].float() if (item not in filtIndex and torch.is_tensor(item) and item.dim()>2) else data_batch[item].float()
if not data_sub_selete[item].is_cuda:
data_sub_selete[item] = data_sub_selete[item].to(device)
return data_sub_selete | null |
156,862 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def detach_data(dictionary):
dictionary_new = {}
for key in dictionary.keys():
dictionary_new[key] = dictionary[key].detach().clone()
return dictionary_new | null |
156,863 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def read_pfm(filename):
file = open(filename, 'rb')
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale | null |
156,864 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def gen_render_path(c2ws, N_views=30):
N = len(c2ws)
rotvec, positions = [], []
rotvec_inteplat, positions_inteplat = [], []
weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1)
for i in range(N):
r = R.from_matrix(c2ws[i, :3, :3])
euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)
if i:
mask = np.abs(euler_ange - rotvec[0])>180
euler_ange[mask] += 360.0
rotvec.append(euler_ange)
positions.append(c2ws[i, :3, 3:].reshape(1, 3))
if i:
rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i])
positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i])
rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0])
positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0])
c2ws_render = []
angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat)
for rotvec, position in zip(angles_inteplat, positions_inteplat):
c2w = np.eye(4)
c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix()
c2w[:3, 3:] = position.reshape(3, 1)
c2ws_render.append(c2w.copy())
c2ws_render = np.stack(c2ws_render)
return c2ws_render | null |
156,865 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def homo_warp_nongrid(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, filter=True, **kwargs):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = src_grid.to(torch.float32) # grid xy
hard_id_xy = torch.ceil(src_grid[:,:,:])
src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2) if filter else src_grid
src_grid[..., 0] = src_grid[..., 0] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1
src_grid[..., 1] = src_grid[..., 1] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1
return src_grid, mask, hard_id_xy | null |
156,866 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
def id2mask(hard_id_xy, HD, WD):
mask = torch.zeros([HD, WD], dtype=torch.int8, device=hard_id_xy.device)
hard_id_xy = hard_id_xy.long()
mask[hard_id_xy[0,...,1], hard_id_xy[0,...,0]] = 1 # torch.ones_like(hard_id_xy[0,...,0], dtype=mask.dtype)
return mask
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def homo_warp_fg_mask(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, **kwargs):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = src_grid.to(torch.float32) # grid xy
hard_id_xy = torch.ceil(src_grid[:,:,:])[:,mask[0,...,0],:]
return id2mask(hard_id_xy, HD, WD) | null |
156,867 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def homo_warp_nongrid_occ(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, tolerate=0.1, scatter_cpu=True):
# src_grid: B, 3, D*H*W xyz
B, M, _ = ref_cam_xyz.shape
if w2c is not None:
src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2)
else:
src_cam_xyz = ref_cam_xyz
# print("src_cam_xyz",src_cam_xyz.shape, intrinsic.shape)
src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2]
# print("src_pix_xy1", src_grid.shape, torch.min(src_grid,dim=-2)[0], torch.max(src_grid,dim=-2)[0])
mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(torch.ceil(src_grid), torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0
src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2)
cam_z = torch.masked_select(src_cam_xyz[:,:,2], mask[...,0]).reshape(B, -1)
src_grid = src_grid.to(torch.float32) # grid xy
# print("HD, WD", HD, WD) 512 640
src_grid_x = src_grid[..., 0:1] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1
src_grid_y = src_grid[..., 1:2] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1
# hard_id_xy: 1, 307405, 2
hard_id_xy = torch.ceil(src_grid[:,:,:])
# print("hard_id_xy", hard_id_xy.shape)
index = (hard_id_xy[...,0] * HD + hard_id_xy[...,1]).long() # 1, 307405
# print("index", index.shape, torch.min(index), torch.max(index))
min_depth, argmin = scatter_min(cam_z[:,:].cpu() if scatter_cpu else cam_z[:,:], index[:,:].cpu() if scatter_cpu else index[:,:], dim=1)
# print("argmin", min_depth.shape, min_depth, argmin.shape)
queried_depth = min_depth.to(ref_cam_xyz.device)[:, index[0,...]] if scatter_cpu else min_depth[:, index[0,...]]
block_mask = (cam_z <= (queried_depth + tolerate))
# print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask))
mask[mask.clone()] = block_mask
# print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask))
# print("src_grid_x", src_grid_x.shape)
src_grid_x = torch.masked_select(src_grid_x, block_mask[..., None]).reshape(B, -1, 1)
src_grid_y = torch.masked_select(src_grid_y, block_mask[..., None]).reshape(B, -1, 1)
# print("src_grid_x", src_grid_x.shape, src_grid_y.shape, mask.shape)
return torch.cat([src_grid_x, src_grid_y], dim=-1), mask, hard_id_xy | null |
156,868 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def extract_from_2d_grid(src_feat, src_grid, mask):
B, M, _ = src_grid.shape
warped_src_feat = F.grid_sample(src_feat, src_grid[:, None, ...], mode='bilinear', padding_mode='zeros', align_corners=True) # (B, C, D, H*W)
warped_src_feat = warped_src_feat.permute(0,2,3,1).view(B, M, src_feat.shape[1]).cuda() # 1, 224874, 3
if mask is not None:
B, N, _ = mask.shape
full_src_feat = torch.zeros([B, N, src_feat.shape[1]], device=warped_src_feat.device, dtype=warped_src_feat.dtype)
full_src_feat[0, mask[0,:,0], :] = warped_src_feat
warped_src_feat = full_src_feat
return warped_src_feat | null |
156,869 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
The provided code snippet includes necessary dependencies for implementing the `homo_warp` function. Write a Python function `def homo_warp(src_feat, proj_mat, depth_values, src_grid=None, pad=0)` to solve the following problem:
src_feat: (B, C, H, W) proj_mat: (B, 3, 4) equal to "src_proj @ ref_proj_inv" depth_values: (B, D, H, W) out: (B, C, D, H, W)
Here is the function:
def homo_warp(src_feat, proj_mat, depth_values, src_grid=None, pad=0):
"""
src_feat: (B, C, H, W)
proj_mat: (B, 3, 4) equal to "src_proj @ ref_proj_inv"
depth_values: (B, D, H, W)
out: (B, C, D, H, W)
"""
if src_grid==None:
B, C, H, W = src_feat.shape
device = src_feat.device
if pad>0:
H_pad, W_pad = H + pad*2, W + pad*2
else:
H_pad, W_pad = H, W
depth_values = depth_values[...,None,None].repeat(1, 1, H_pad, W_pad)
D = depth_values.shape[1]
R = proj_mat[:, :, :3] # (B, 3, 3)
T = proj_mat[:, :, 3:] # (B, 3, 1)
# create grid from the ref frame
ref_grid = create_meshgrid(H_pad, W_pad, normalized_coordinates=False, device=device) # (1, H, W, 2)
if pad>0:
ref_grid -= pad
ref_grid = ref_grid.permute(0, 3, 1, 2) # (1, 2, H, W)
ref_grid = ref_grid.reshape(1, 2, W_pad * H_pad) # (1, 2, H*W)
ref_grid = ref_grid.expand(B, -1, -1) # (B, 2, H*W)
ref_grid = torch.cat((ref_grid, torch.ones_like(ref_grid[:, :1])), 1) # (B, 3, H*W)
ref_grid_d = ref_grid.repeat(1, 1, D) # (B, 3, D*H*W), X, Y, Z
src_grid_d = R @ ref_grid_d + T / depth_values.view(B, 1, D * W_pad * H_pad)
del ref_grid_d, ref_grid, proj_mat, R, T, depth_values # release (GPU) memory
src_grid = src_grid_d[:, :2] / src_grid_d[:, 2:] # divide by depth (B, 2, D*H*W)
del src_grid_d
src_grid[:, 0] = src_grid[:, 0] / ((W - 1) / 2) - 1 # scale to -1~1
src_grid[:, 1] = src_grid[:, 1] / ((H - 1) / 2) - 1 # scale to -1~1
src_grid = src_grid.permute(0, 2, 1) # (B, D*H*W, 2)
src_grid = src_grid.view(B, D, H_pad, W_pad, 2)
B, D, H_pad, W_pad = src_grid.shape[:4]
src_grid = src_grid.to(src_feat.dtype) # 1, 32, 128, 160
warped_src_feat = F.grid_sample(src_feat, src_grid.view(B, D, H_pad * W_pad, 2),
mode='bilinear', padding_mode='zeros',
align_corners=True) # (B, C, D, H*W)
warped_src_feat = warped_src_feat.view(B, -1, D, H_pad, W_pad)
return warped_src_feat, src_grid | src_feat: (B, C, H, W) proj_mat: (B, 3, 4) equal to "src_proj @ ref_proj_inv" depth_values: (B, D, H, W) out: (B, C, D, H, W) |
156,870 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def construct_vox_points(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
space_max = xyz_mid + space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0)
min_idx, _ = scatter_min(torch.arange(len(xyz), device=xyz.device), inv_idx, dim=0)
return xyz_centroid, sparse_grid_idx, min_idx | null |
156,871 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def construct_vox_points_xyz(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0)
return xyz_centroid | null |
156,872 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def construct_vox_points_ind(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None):
# xyz, N, 3
xyz = xyz_val if partition_xyz is None else partition_xyz
if space_min is None:
xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0]
space_edge = torch.max(xyz_max - xyz_min) * 1.05
xyz_mid = (xyz_max + xyz_min) / 2
space_min = xyz_mid - space_edge / 2
space_max = xyz_mid + space_edge / 2
else:
space_edge = space_max - space_min
construct_vox_sz = space_edge / vox_res
xyz_shift = xyz - space_min[None, ...]
sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True)
return sparse_grid_idx, inv_idx, space_min, space_max | null |
156,873 | import os, torch, cv2, re
import numpy as np
from torch_scatter import scatter_min, segment_coo, scatter_mean
from PIL import Image
import torch.nn.functional as F
import torchvision.transforms as T
from functools import partial
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from kornia.utils import create_meshgrid
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from warmup_scheduler import GradualWarmupScheduler
def transform_points_to_voxels(points, point_cloud_range, voxel_sizes, max_pnts_per_vox, max_voxels, voxel_generator=None):
voxel_output = voxel_generator.generate(points)
if isinstance(voxel_output, dict):
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
else:
voxels, coordinates, num_points = voxel_output
return voxels, coordinates, num_points | null |
156,874 | import torch
import os
from torch.utils.data import DataLoader
import imageio
from .models import *
from .renderer import *
from .mvs_utils import *
from . import filter_utils
from ..helpers.networks import init_seq
from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet
from torch.optim.lr_scheduler import CosineAnnealingLR
from inplace_abn import InPlaceABN
from collections import OrderedDict
from torchvision import transforms as T
def init_seq(s, init_type='xavier_uniform'):
def premlp_init(opt):
in_channels = 63
out_channels = opt.point_features_dim
blocks = []
act = getattr(nn, opt.act_type, None)
for i in range(opt.shading_feature_mlp_layer1):
blocks.append(nn.Linear(in_channels, out_channels))
blocks.append(act(inplace=True))
in_channels = out_channels
blocks = nn.Sequential(*blocks)
init_seq(blocks)
return blocks | null |
156,875 | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
156,876 | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def add_property2dict(target_dict, object, props):
for prop in props:
target_dict[prop] = getattr(object, prop) | null |
156,877 | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def normalize(v, axis=0):
# axis = 0, normalize each col
# axis = 1, normalize each row
return v / (np.linalg.norm(v, axis=axis, keepdims=True) + 1e-9) | null |
156,878 | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def gen_render_path(c2ws, N_views=30):
N = len(c2ws)
rotvec, positions = [], []
rotvec_inteplat, positions_inteplat = [], []
weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1)
for i in range(N):
r = R.from_matrix(c2ws[i, :3, :3])
euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3)
if i:
mask = np.abs(euler_ange - rotvec[0])>180
euler_ange[mask] += 360.0
rotvec.append(euler_ange)
positions.append(c2ws[i, :3, 3:].reshape(1, 3))
if i:
rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i])
positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i])
rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0])
positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0])
c2ws_render = []
angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat)
for rotvec, position in zip(angles_inteplat, positions_inteplat):
c2w = np.eye(4)
c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix()
c2w[:3, 3:] = position.reshape(3, 1)
c2ws_render.append(c2w.copy())
c2ws_render = np.stack(c2ws_render)
return c2ws_render | null |
156,879 | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision.utils import make_grid
from os.path import join
import torch.nn.functional as F
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
def unique_lst(list1):
x = np.array(list1)
return np.unique(x) | null |
156,880 |
def underscore2camelcase(s):
assert s == s.lower(), 'Invalid underscore string: no upper case character is allowed, in "{}"'.format(s)
assert all([x.isdigit() or x.isalpha() or x == '_' for x in s]),\
'Invalid underscore, all character must be letters or numbers or underscore'
terms = s.split('_')
for x in terms:
assert x, 'Invalid underscore string: no consecutive _ is allowed, in "{}"'.format(s)
assert x[0].upper() != x[0], \
'Invalid underscore string: phrases must start with a character, in "{}'.format(s)
return ''.join([x[0].upper() + x[1:] for x in terms if x]) | null |
156,881 |
def camelcase2underscore(s):
assert s[0].isupper(), 'Invalid camel case, first character must be upper case, in "{}"'.format(s)
assert all([x.isdigit() or x.isalpha() for x in s]),\
'Invalid camel case, all character must be letters or numbers'
out = s[0].lower()
for x in s[1:]:
if x.lower() != x:
out += '_' + x.lower()
else:
out += x
return out | null |
156,882 | import numpy as np
import os
from PIL import Image
import shutil
from collections import OrderedDict
import time
import datetime
import torch
import imageio
from utils.util import to8b
from models.mvs.mvs_utils import *
def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) | null |
156,883 | import numpy as np
import os
from PIL import Image
import shutil
from collections import OrderedDict
import time
import datetime
import torch
import imageio
from utils.util import to8b
from models.mvs.mvs_utils import *
def save_image(img_array, filepath):
assert len(img_array.shape) == 2 or (len(img_array.shape) == 3
and img_array.shape[2] in [3, 4])
if img_array.dtype != np.uint8:
img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
Image.fromarray(img_array).save(filepath) | null |
156,884 | import numpy as np
import os
from PIL import Image
import shutil
from collections import OrderedDict
import time
import datetime
import torch
import imageio
from utils.util import to8b
from models.mvs.mvs_utils import *
def save_points(xyz, dir, total_steps):
if xyz.ndim < 3:
xyz = xyz[None, ...]
os.makedirs(dir, exist_ok=True)
for i in range(xyz.shape[0]):
if isinstance(total_steps,str):
filename = 'step-{}-{}.txt'.format(total_steps, i)
else:
filename = 'step-{:04d}-{}.txt'.format(total_steps, i)
filepath = os.path.join(dir, filename)
np.savetxt(filepath, xyz[i, ...].reshape(-1, xyz.shape[-1]), delimiter=";") | null |
156,885 | import torch
import math
from tqdm import trange
import k_diffusion as K
The provided code snippet includes necessary dependencies for implementing the `alpha_sigma_to_t` function. Write a Python function `def alpha_sigma_to_t(alpha, sigma)` to solve the following problem:
Returns a timestep, given the scaling factors for the clean image and for the noise.
Here is the function:
def alpha_sigma_to_t(alpha, sigma):
"""Returns a timestep, given the scaling factors for the clean image and for
the noise."""
return torch.atan2(sigma, alpha) / math.pi * 2 | Returns a timestep, given the scaling factors for the clean image and for the noise. |
156,886 | import torch
import math
from tqdm import trange
import k_diffusion as K
The provided code snippet includes necessary dependencies for implementing the `t_to_alpha_sigma` function. Write a Python function `def t_to_alpha_sigma(t)` to solve the following problem:
Returns the scaling factors for the clean image and for the noise, given a timestep.
Here is the function:
def t_to_alpha_sigma(t):
"""Returns the scaling factors for the clean image and for the noise, given
a timestep."""
return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2) | Returns the scaling factors for the clean image and for the noise, given a timestep. |
156,887 | import torch
import math
from tqdm import trange
import k_diffusion as K
def get_alphas_sigmas(t):
"""Returns the scaling factors for the clean image (alpha) and for the
noise (sigma), given a timestep."""
return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2)
The provided code snippet includes necessary dependencies for implementing the `sample` function. Write a Python function `def sample(model, x, steps, eta, **extra_args)` to solve the following problem:
Draws samples from a model given starting noise. v-diffusion
Here is the function:
def sample(model, x, steps, eta, **extra_args):
"""Draws samples from a model given starting noise. v-diffusion"""
ts = x.new_ones([x.shape[0]])
# Create the noise schedule
t = torch.linspace(1, 0, steps + 1)[:-1]
alphas, sigmas = get_alphas_sigmas(t)
# The sampling loop
for i in trange(steps):
# Get the model output (v, the predicted velocity)
with torch.cuda.amp.autocast():
v = model(x, ts * t[i], **extra_args).float()
# Predict the noise and the denoised image
pred = x * alphas[i] - v * sigmas[i]
eps = x * sigmas[i] + v * alphas[i]
# If we are not on the last timestep, compute the noisy image for the
# next timestep.
if i < steps - 1:
# If eta > 0, adjust the scaling factor for the predicted noise
# downward according to the amount of additional noise to add
ddim_sigma = eta * (sigmas[i + 1]**2 / sigmas[i]**2).sqrt() * \
(1 - alphas[i]**2 / alphas[i + 1]**2).sqrt()
adjusted_sigma = (sigmas[i + 1]**2 - ddim_sigma**2).sqrt()
# Recombine the predicted noise and predicted denoised image in the
# correct proportions for the next step
x = pred * alphas[i + 1] + eps * adjusted_sigma
# Add the correct amount of fresh noise
if eta:
x += torch.randn_like(x) * ddim_sigma
# If we are on the last timestep, output the denoised image
return pred | Draws samples from a model given starting noise. v-diffusion |
156,888 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
def fast_scandir(
dir:str, # top-level directory at which to begin scanning
ext:list, # list of allowed file extensions,
#max_size = 1 * 1000 * 1000 * 1000 # Only files < 1 GB
):
"very fast `glob` alternative. from https://stackoverflow.com/a/59803793/4259243"
subfolders, files = [], []
ext = ['.'+x if x[0]!='.' else x for x in ext] # add starting period to extensions if needed
try: # hope to avoid 'permission denied' by this try
for f in os.scandir(dir):
try: # 'hope to avoid too many levels of symbolic links' error
if f.is_dir():
subfolders.append(f.path)
elif f.is_file():
file_ext = os.path.splitext(f.name)[1].lower()
is_hidden = os.path.basename(f.path).startswith(".")
if file_ext in ext and not is_hidden:
files.append(f.path)
except:
pass
except:
pass
for dir in list(subfolders):
sf, f = fast_scandir(dir, ext)
subfolders.extend(sf)
files.extend(f)
return subfolders, files
def keyword_scandir(
dir: str, # top-level directory at which to begin scanning
ext: list, # list of allowed file extensions
keywords: list, # list of keywords to search for in the file name
):
"very fast `glob` alternative. from https://stackoverflow.com/a/59803793/4259243"
subfolders, files = [], []
# make keywords case insensitive
keywords = [keyword.lower() for keyword in keywords]
# add starting period to extensions if needed
ext = ['.'+x if x[0] != '.' else x for x in ext]
banned_words = ["paxheader", "__macosx"]
try: # hope to avoid 'permission denied' by this try
for f in os.scandir(dir):
try: # 'hope to avoid too many levels of symbolic links' error
if f.is_dir():
subfolders.append(f.path)
elif f.is_file():
is_hidden = f.name.split("/")[-1][0] == '.'
has_ext = os.path.splitext(f.name)[1].lower() in ext
name_lower = f.name.lower()
has_keyword = any(
[keyword in name_lower for keyword in keywords])
has_banned = any(
[banned_word in name_lower for banned_word in banned_words])
if has_ext and has_keyword and not has_banned and not is_hidden and not os.path.basename(f.path).startswith("._"):
files.append(f.path)
except:
pass
except:
pass
for dir in list(subfolders):
sf, f = keyword_scandir(dir, ext, keywords)
subfolders.extend(sf)
files.extend(f)
return subfolders, files
The provided code snippet includes necessary dependencies for implementing the `get_audio_filenames` function. Write a Python function `def get_audio_filenames( paths: list, # directories in which to search keywords=None, exts=['.wav', '.mp3', '.flac', '.ogg', '.aif', '.opus'] )` to solve the following problem:
recursively get a list of audio filenames
Here is the function:
def get_audio_filenames(
paths: list, # directories in which to search
keywords=None,
exts=['.wav', '.mp3', '.flac', '.ogg', '.aif', '.opus']
):
"recursively get a list of audio filenames"
filenames = []
if type(paths) is str:
paths = [paths]
for path in paths: # get a list of relevant filenames
if keywords is not None:
subfolders, files = keyword_scandir(path, exts, keywords)
else:
subfolders, files = fast_scandir(path, exts)
filenames.extend(files)
return filenames | recursively get a list of audio filenames |
156,889 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
wds.tariterators.group_by_keys = group_by_keys
The provided code snippet includes necessary dependencies for implementing the `group_by_keys` function. Write a Python function `def group_by_keys(data, keys=wds.tariterators.base_plus_ext, lcase=True, suffixes=None, handler=None)` to solve the following problem:
Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True)
Here is the function:
def group_by_keys(data, keys=wds.tariterators.base_plus_ext, lcase=True, suffixes=None, handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
:param lcase: convert suffixes to lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if wds.tariterators.trace:
print(
prefix,
suffix,
current_sample.keys() if isinstance(current_sample, dict) else None,
)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
if current_sample is None or prefix != current_sample["__key__"]:
if wds.tariterators.valid_sample(current_sample):
yield current_sample
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
if suffix in current_sample:
print(f"{fname}: duplicate file name in tar file {suffix} {current_sample.keys()}")
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if wds.tariterators.valid_sample(current_sample):
yield current_sample | Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True) |
156,890 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
def get_s3_contents(dataset_path, s3_url_prefix=None, filter='', recursive=True, debug=False, profile=None):
"""
Returns a list of full S3 paths to files in a given S3 bucket and directory path.
"""
# Ensure dataset_path ends with a trailing slash
if dataset_path != '' and not dataset_path.endswith('/'):
dataset_path += '/'
# Use posixpath to construct the S3 URL path
bucket_path = posixpath.join(s3_url_prefix or '', dataset_path)
# Construct the `aws s3 ls` command
cmd = ['aws', 's3', 'ls', bucket_path]
if profile is not None:
cmd.extend(['--profile', profile])
if recursive:
# Add the --recursive flag if requested
cmd.append('--recursive')
# Run the `aws s3 ls` command and capture the output
run_ls = subprocess.run(cmd, capture_output=True, check=True)
# Split the output into lines and strip whitespace from each line
contents = run_ls.stdout.decode('utf-8').split('\n')
contents = [x.strip() for x in contents if x]
# Remove the timestamp from lines that begin with a timestamp
contents = [re.sub(r'^\S+\s+\S+\s+\d+\s+', '', x)
if re.match(r'^\S+\s+\S+\s+\d+\s+', x) else x for x in contents]
# Construct a full S3 path for each file in the contents list
contents = [posixpath.join(s3_url_prefix or '', x)
for x in contents if not x.endswith('/')]
# Apply the filter, if specified
if filter:
contents = [x for x in contents if filter in x]
# Remove redundant directory names in the S3 URL
if recursive:
# Get the main directory name from the S3 URL
main_dir = "/".join(bucket_path.split('/')[3:])
# Remove the redundant directory names from each file path
contents = [x.replace(f'{main_dir}', '').replace(
'//', '/') for x in contents]
# Print debugging information, if requested
if debug:
print("contents = \n", contents)
# Return the list of S3 paths to files
return contents
The provided code snippet includes necessary dependencies for implementing the `get_all_s3_urls` function. Write a Python function `def get_all_s3_urls( names=[], # list of all valid [LAION AudioDataset] dataset names # list of subsets you want from those datasets, e.g. ['train','valid'] subsets=[''], s3_url_prefix=None, # prefix for those dataset names recursive=True, # recursively list all tar files in all subdirs filter_str='tar', # only grab files with this substring # print debugging info -- note: info displayed likely to change at dev's whims debug=False, profiles={}, # dictionary of profiles for each item in names, e.g. {'dataset1': 'profile1', 'dataset2': 'profile2'} )` to solve the following problem:
get urls of shards (tar files) for multiple datasets in one s3 bucket
Here is the function:
def get_all_s3_urls(
names=[], # list of all valid [LAION AudioDataset] dataset names
# list of subsets you want from those datasets, e.g. ['train','valid']
subsets=[''],
s3_url_prefix=None, # prefix for those dataset names
recursive=True, # recursively list all tar files in all subdirs
filter_str='tar', # only grab files with this substring
# print debugging info -- note: info displayed likely to change at dev's whims
debug=False,
profiles={}, # dictionary of profiles for each item in names, e.g. {'dataset1': 'profile1', 'dataset2': 'profile2'}
):
"get urls of shards (tar files) for multiple datasets in one s3 bucket"
urls = []
for name in names:
# If s3_url_prefix is not specified, assume the full S3 path is included in each element of the names list
if s3_url_prefix is None:
contents_str = name
else:
# Construct the S3 path using the s3_url_prefix and the current name value
contents_str = posixpath.join(s3_url_prefix, name)
if debug:
print(f"get_all_s3_urls: {contents_str}:")
for subset in subsets:
subset_str = posixpath.join(contents_str, subset)
if debug:
print(f"subset_str = {subset_str}")
# Get the list of tar files in the current subset directory
profile = profiles.get(name, None)
tar_list = get_s3_contents(
subset_str, s3_url_prefix=None, recursive=recursive, filter=filter_str, debug=debug, profile=profile)
for tar in tar_list:
# Escape spaces and parentheses in the tar filename for use in the shell command
tar = tar.replace(" ", "\ ").replace(
"(", "\(").replace(")", "\)")
# Construct the S3 path to the current tar file
s3_path = posixpath.join(name, subset, tar) + " -"
# Construct the AWS CLI command to download the current tar file
if s3_url_prefix is None:
request_str = f"pipe:aws s3 --cli-connect-timeout 0 cp {s3_path}"
else:
request_str = f"pipe:aws s3 --cli-connect-timeout 0 cp {posixpath.join(s3_url_prefix, s3_path)}"
if profiles.get(name):
request_str += f" --profile {profiles.get(name)}"
if debug:
print("request_str = ", request_str)
# Add the constructed URL to the list of URLs
urls.append(request_str)
return urls | get urls of shards (tar files) for multiple datasets in one s3 bucket |
156,891 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
The provided code snippet includes necessary dependencies for implementing the `log_and_continue` function. Write a Python function `def log_and_continue(exn)` to solve the following problem:
Call in an exception handler to ignore any exception, isssue a warning, and continue.
Here is the function:
def log_and_continue(exn):
"""Call in an exception handler to ignore any exception, isssue a warning, and continue."""
print(f"Handling webdataset error ({repr(exn)}). Ignoring.")
return True | Call in an exception handler to ignore any exception, isssue a warning, and continue. |
156,892 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
def is_valid_sample(sample):
has_json = "json" in sample
has_audio = "audio" in sample
is_silent = is_silence(sample["audio"])
is_rejected = "__reject__" in sample["json"] and sample["json"]["__reject__"]
return has_json and has_audio and not is_silent and not is_rejected | null |
156,893 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
AUDIO_KEYS = ("flac", "wav", "mp3", "m4a", "ogg", "opus")
def audio_decoder(key, value):
# Get file extension from key
ext = key.split(".")[-1]
if ext in AUDIO_KEYS:
return torchaudio.load(io.BytesIO(value))
else:
return None | null |
156,894 | import importlib
import numpy as np
import io
import os
import posixpath
import random
import re
import subprocess
import time
import torch
import torchaudio
import webdataset as wds
from aeiou.core import is_silence
from os import path
from pedalboard.io import AudioFile
from torchaudio import transforms as T
from typing import Optional, Callable, List
from .utils import Stereo, Mono, PhaseFlipper, PadCrop_Normalized_T
class SampleDataset(torch.utils.data.Dataset):
def __init__(
self,
paths,
sample_size=65536,
sample_rate=48000,
keywords=None,
relpath=None,
random_crop=True,
force_channels="stereo",
custom_metadata_fn: Optional[Callable[[str], str]] = None
):
super().__init__()
self.filenames = []
self.relpath = relpath
self.augs = torch.nn.Sequential(
PhaseFlipper(),
)
self.pad_crop = PadCrop_Normalized_T(sample_size, sample_rate, randomize=random_crop)
self.force_channels = force_channels
self.encoding = torch.nn.Sequential(
Stereo() if self.force_channels == "stereo" else torch.nn.Identity(),
Mono() if self.force_channels == "mono" else torch.nn.Identity(),
)
self.filenames = get_audio_filenames(paths, keywords)
print(f'Found {len(self.filenames)} files')
self.sr = sample_rate
self.custom_metadata_fn = custom_metadata_fn
def load_file(self, filename):
ext = filename.split(".")[-1]
if ext == "mp3":
with AudioFile(filename) as f:
audio = f.read(f.frames)
audio = torch.from_numpy(audio)
in_sr = f.samplerate
else:
audio, in_sr = torchaudio.load(filename, format=ext)
if in_sr != self.sr:
resample_tf = T.Resample(in_sr, self.sr)
audio = resample_tf(audio)
return audio
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
audio_filename = self.filenames[idx]
try:
start_time = time.time()
audio = self.load_file(audio_filename)
audio, t_start, t_end, seconds_start, seconds_total, padding_mask = self.pad_crop(audio)
# Run augmentations on this sample (including random crop)
if self.augs is not None:
audio = self.augs(audio)
audio = audio.clamp(-1, 1)
# Encode the file to assist in prediction
if self.encoding is not None:
audio = self.encoding(audio)
info = {}
info["path"] = audio_filename
if self.relpath is not None:
info["relpath"] = path.relpath(audio_filename, self.relpath)
info["timestamps"] = (t_start, t_end)
info["seconds_start"] = seconds_start
info["seconds_total"] = seconds_total
info["padding_mask"] = padding_mask
end_time = time.time()
info["load_time"] = end_time - start_time
if self.custom_metadata_fn is not None:
custom_metadata = self.custom_metadata_fn(info, audio)
info.update(custom_metadata)
if "__reject__" in info and info["__reject__"]:
return self[random.randrange(len(self))]
return (audio, info)
except Exception as e:
print(f'Couldn\'t load file {audio_filename}: {e}')
return self[random.randrange(len(self))]
class S3DatasetConfig:
def __init__(
self,
id: str,
s3_path: str,
custom_metadata_fn: Optional[Callable[[str], str]] = None,
profile: Optional[str] = None,
):
self.id = id
self.s3_path = s3_path
self.custom_metadata_fn = custom_metadata_fn
self.profile = profile
self.urls = []
def load_data_urls(self):
self.urls = get_all_s3_urls(
names=[self.s3_path],
s3_url_prefix=None,
recursive=True,
profiles={self.s3_path: self.profile} if self.profile else {},
)
return self.urls
def collation_fn(samples):
batched = list(zip(*samples))
result = []
for b in batched:
if isinstance(b[0], (int, float)):
b = np.array(b)
elif isinstance(b[0], torch.Tensor):
b = torch.stack(b)
elif isinstance(b[0], np.ndarray):
b = np.array(b)
else:
b = b
result.append(b)
return result
class S3WebDataLoader():
def __init__(
self,
datasets: List[S3DatasetConfig],
batch_size,
sample_size,
sample_rate=48000,
num_workers=8,
epoch_steps=1000,
random_crop=True,
force_channels="stereo",
augment_phase=True,
**data_loader_kwargs
):
self.datasets = datasets
self.sample_size = sample_size
self.sample_rate = sample_rate
self.random_crop = random_crop
self.force_channels = force_channels
self.augment_phase = augment_phase
urls = [dataset.load_data_urls() for dataset in datasets]
# Flatten the list of lists of URLs
urls = [url for dataset_urls in urls for url in dataset_urls]
self.dataset = wds.DataPipeline(
wds.ResampledShards(urls),
wds.tarfile_to_samples(handler=log_and_continue),
wds.decode(audio_decoder, handler=log_and_continue),
wds.map(self.wds_preprocess, handler=log_and_continue),
wds.select(is_valid_sample),
wds.to_tuple("audio", "json", handler=log_and_continue),
wds.batched(batch_size, partial=False, collation_fn=collation_fn),
).with_epoch(epoch_steps//num_workers if num_workers > 0 else epoch_steps)
self.data_loader = wds.WebLoader(self.dataset, num_workers=num_workers, **data_loader_kwargs)
def wds_preprocess(self, sample):
found_key, rewrite_key = '', ''
for k, v in sample.items(): # print the all entries in dict
for akey in AUDIO_KEYS:
if k.endswith(akey):
# to rename long/weird key with its simpler counterpart
found_key, rewrite_key = k, akey
break
if '' != found_key:
break
if '' == found_key: # got no audio!
return None # try returning None to tell WebDataset to skip this one
audio, in_sr = sample[found_key]
if in_sr != self.sample_rate:
resample_tf = T.Resample(in_sr, self.sample_rate)
audio = resample_tf(audio)
if self.sample_size is not None:
# Pad/crop and get the relative timestamp
pad_crop = PadCrop_Normalized_T(
self.sample_size, randomize=self.random_crop, sample_rate=self.sample_rate)
audio, t_start, t_end, seconds_start, seconds_total, padding_mask = pad_crop(
audio)
sample["json"]["seconds_start"] = seconds_start
sample["json"]["seconds_total"] = seconds_total
sample["json"]["padding_mask"] = padding_mask
else:
t_start, t_end = 0, 1
# Check if audio is length zero, initialize to a single zero if so
if audio.shape[-1] == 0:
audio = torch.zeros(1, 1)
# Make the audio stereo and augment by randomly inverting phase
augs = torch.nn.Sequential(
Stereo() if self.force_channels == "stereo" else torch.nn.Identity(),
Mono() if self.force_channels == "mono" else torch.nn.Identity(),
PhaseFlipper() if self.augment_phase else torch.nn.Identity()
)
audio = augs(audio)
sample["json"]["timestamps"] = (t_start, t_end)
if "text" in sample["json"]:
sample["json"]["prompt"] = sample["json"]["text"]
# Check for custom metadata functions
for dataset in self.datasets:
if dataset.custom_metadata_fn is None:
continue
if dataset.s3_path in sample["__url__"]:
custom_metadata = dataset.custom_metadata_fn(sample["json"], audio)
sample["json"].update(custom_metadata)
if found_key != rewrite_key: # rename long/weird key with its simpler counterpart
del sample[found_key]
sample["audio"] = audio
# Add audio to the metadata as well for conditioning
sample["json"]["audio"] = audio
return sample
def create_dataloader_from_config(dataset_config, batch_size, sample_size, sample_rate, audio_channels=2, num_workers=4):
dataset_type = dataset_config.get("dataset_type", None)
assert dataset_type is not None, "Dataset type must be specified in dataset config"
if audio_channels == 1:
force_channels = "mono"
else:
force_channels = "stereo"
if dataset_type == "audio_dir":
audio_dir_configs = dataset_config.get("datasets", None)
assert audio_dir_configs is not None, "Directory configuration must be specified in datasets[\"dataset\"]"
training_dirs = []
custom_metadata_fn = None
custom_metadata_module_path = dataset_config.get("custom_metadata_module", None)
if custom_metadata_module_path is not None:
spec = importlib.util.spec_from_file_location("metadata_module", custom_metadata_module_path)
metadata_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(metadata_module)
custom_metadata_fn = metadata_module.get_custom_metadata
for audio_dir_config in audio_dir_configs:
audio_dir_path = audio_dir_config.get("path", None)
assert audio_dir_path is not None, "Path must be set for local audio directory configuration"
training_dirs.append(audio_dir_path)
train_set = SampleDataset(
training_dirs,
sample_rate=sample_rate,
sample_size=sample_size,
random_crop=dataset_config.get("random_crop", True),
force_channels=force_channels,
custom_metadata_fn=custom_metadata_fn,
relpath=training_dirs[0] #TODO: Make relpath relative to each training dir
)
return torch.utils.data.DataLoader(train_set, batch_size, shuffle=True,
num_workers=num_workers, persistent_workers=True, pin_memory=True, drop_last=True, collate_fn=collation_fn)
elif dataset_type == "s3":
dataset_configs = []
for s3_config in dataset_config["datasets"]:
custom_metadata_fn = None
custom_metadata_module_path = s3_config.get("custom_metadata_module", None)
if custom_metadata_module_path is not None:
spec = importlib.util.spec_from_file_location("metadata_module", custom_metadata_module_path)
metadata_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(metadata_module)
custom_metadata_fn = metadata_module.get_custom_metadata
dataset_configs.append(
S3DatasetConfig(
id=s3_config["id"],
s3_path=s3_config["s3_path"],
custom_metadata_fn=custom_metadata_fn,
profile=s3_config.get("profile", None),
)
)
return S3WebDataLoader(
dataset_configs,
sample_rate=sample_rate,
sample_size=sample_size,
batch_size=batch_size,
random_crop=dataset_config.get("random_crop", True),
num_workers=num_workers,
persistent_workers=True,
force_channels=force_channels,
epoch_steps=dataset_config.get("epoch_steps", 2000),
).data_loader | null |
156,895 | import torch
import os
The provided code snippet includes necessary dependencies for implementing the `get_rank` function. Write a Python function `def get_rank()` to solve the following problem:
Get rank of current process.
Here is the function:
def get_rank():
"""Get rank of current process."""
print(os.environ.keys())
if "SLURM_PROCID" in os.environ:
return int(os.environ["SLURM_PROCID"])
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank() | Get rank of current process. |
156,896 | import torch
import os
The provided code snippet includes necessary dependencies for implementing the `create_optimizer_from_config` function. Write a Python function `def create_optimizer_from_config(optimizer_config, parameters)` to solve the following problem:
Create optimizer from config. Args: parameters (iterable): parameters to optimize. optimizer_config (dict): optimizer config. Returns: torch.optim.Optimizer: optimizer.
Here is the function:
def create_optimizer_from_config(optimizer_config, parameters):
"""Create optimizer from config.
Args:
parameters (iterable): parameters to optimize.
optimizer_config (dict): optimizer config.
Returns:
torch.optim.Optimizer: optimizer.
"""
optimizer_type = optimizer_config["type"]
if optimizer_type == "FusedAdam":
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(parameters, **optimizer_config["config"])
else:
optimizer_fn = getattr(torch.optim, optimizer_type)
optimizer = optimizer_fn(parameters, **optimizer_config["config"])
return optimizer | Create optimizer from config. Args: parameters (iterable): parameters to optimize. optimizer_config (dict): optimizer config. Returns: torch.optim.Optimizer: optimizer. |
156,897 | import torch
import os
class InverseLR(torch.optim.lr_scheduler._LRScheduler):
"""Implements an inverse decay learning rate schedule with an optional exponential
warmup. When last_epoch=-1, sets initial lr as lr.
inv_gamma is the number of steps/epochs required for the learning rate to decay to
(1 / 2)**power of its original value.
Args:
optimizer (Optimizer): Wrapped optimizer.
inv_gamma (float): Inverse multiplicative factor of learning rate decay. Default: 1.
power (float): Exponential factor of learning rate decay. Default: 1.
warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
Default: 0.
final_lr (float): The final learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
"""
def __init__(self, optimizer, inv_gamma=1., power=1., warmup=0., final_lr=0.,
last_epoch=-1, verbose=False):
self.inv_gamma = inv_gamma
self.power = power
if not 0. <= warmup < 1:
raise ValueError('Invalid value for warmup')
self.warmup = warmup
self.final_lr = final_lr
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
import warnings
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
return self._get_closed_form_lr()
def _get_closed_form_lr(self):
warmup = 1 - self.warmup ** (self.last_epoch + 1)
lr_mult = (1 + self.last_epoch / self.inv_gamma) ** -self.power
return [warmup * max(self.final_lr, base_lr * lr_mult)
for base_lr in self.base_lrs]
The provided code snippet includes necessary dependencies for implementing the `create_scheduler_from_config` function. Write a Python function `def create_scheduler_from_config(scheduler_config, optimizer)` to solve the following problem:
Create scheduler from config. Args: scheduler_config (dict): scheduler config. optimizer (torch.optim.Optimizer): optimizer. Returns: torch.optim.lr_scheduler._LRScheduler: scheduler.
Here is the function:
def create_scheduler_from_config(scheduler_config, optimizer):
"""Create scheduler from config.
Args:
scheduler_config (dict): scheduler config.
optimizer (torch.optim.Optimizer): optimizer.
Returns:
torch.optim.lr_scheduler._LRScheduler: scheduler.
"""
if scheduler_config["type"] == "InverseLR":
scheduler_fn = InverseLR
else:
scheduler_fn = getattr(torch.optim.lr_scheduler, scheduler_config["type"])
scheduler = scheduler_fn(optimizer, **scheduler_config["config"])
return scheduler | Create scheduler from config. Args: scheduler_config (dict): scheduler config. optimizer (torch.optim.Optimizer): optimizer. Returns: torch.optim.lr_scheduler._LRScheduler: scheduler. |
156,898 | import torch
import torchaudio
import wandb
from einops import rearrange
from safetensors.torch import save_file, save_model
from torch import nn, optim
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from ema_pytorch import EMA
import auraloss
import pytorch_lightning as pl
from ..models.autoencoders import AudioAutoencoder
from ..models.discriminators import EncodecDiscriminator, OobleckDiscriminator, DACGANLoss
from ..models.bottleneck import VAEBottleneck, RVQBottleneck, DACRVQBottleneck, DACRVQVAEBottleneck, RVQVAEBottleneck, WassersteinBottleneck
from .losses import MultiLoss, AuralossLoss, ValueLoss, L1Loss
from .utils import create_optimizer_from_config, create_scheduler_from_config
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from aeiou.viz import pca_point_cloud, audio_spectrogram_image, tokens_spectrogram_image
class VAEBottleneck(Bottleneck):
def __init__(self):
super().__init__(is_discrete=False)
def encode(self, x, return_info=False, **kwargs):
info = {}
mean, scale = x.chunk(2, dim=1)
x, kl = vae_sample(mean, scale)
info["kl"] = kl
if return_info:
return x, info
else:
return x
def decode(self, x):
return x
class WassersteinBottleneck(Bottleneck):
def __init__(self, noise_augment_dim: int = 0):
super().__init__(is_discrete=False)
self.noise_augment_dim = noise_augment_dim
def encode(self, x, return_info=False):
info = {}
if self.training and return_info:
mmd = compute_mmd(x)
info["mmd"] = mmd
if return_info:
return x, info
return x
def decode(self, x):
if self.noise_augment_dim > 0:
noise = torch.randn(x.shape[0], self.noise_augment_dim,
x.shape[-1]).type_as(x)
x = torch.cat([x, noise], dim=1)
return x
class RVQBottleneck(DiscreteBottleneck):
def __init__(self, **quantizer_kwargs):
super().__init__(num_quantizers = quantizer_kwargs["num_quantizers"], codebook_size = quantizer_kwargs["codebook_size"], tokens_id = "quantizer_indices")
self.quantizer = ResidualVQ(**quantizer_kwargs)
self.num_quantizers = quantizer_kwargs["num_quantizers"]
def encode(self, x, return_info=False, **kwargs):
info = {}
x = rearrange(x, "b c n -> b n c")
x, indices, loss = self.quantizer(x)
x = rearrange(x, "b n c -> b c n")
info["quantizer_indices"] = indices
info["quantizer_loss"] = loss.mean()
if return_info:
return x, info
else:
return x
def decode(self, x):
return x
def decode_tokens(self, codes, **kwargs):
latents = self.quantizer.get_outputs_from_indices(codes)
return self.decode(latents, **kwargs)
class RVQVAEBottleneck(DiscreteBottleneck):
def __init__(self, **quantizer_kwargs):
super().__init__(num_quantizers = quantizer_kwargs["num_quantizers"], codebook_size = quantizer_kwargs["codebook_size"], tokens_id = "quantizer_indices")
self.quantizer = ResidualVQ(**quantizer_kwargs)
self.num_quantizers = quantizer_kwargs["num_quantizers"]
def encode(self, x, return_info=False):
info = {}
x, kl = vae_sample(*x.chunk(2, dim=1))
info["kl"] = kl
x = rearrange(x, "b c n -> b n c")
x, indices, loss = self.quantizer(x)
x = rearrange(x, "b n c -> b c n")
info["quantizer_indices"] = indices
info["quantizer_loss"] = loss.mean()
if return_info:
return x, info
else:
return x
def decode(self, x):
return x
def decode_tokens(self, codes, **kwargs):
latents = self.quantizer.get_outputs_from_indices(codes)
return self.decode(latents, **kwargs)
class DACRVQBottleneck(DiscreteBottleneck):
def __init__(self, quantize_on_decode=False, **quantizer_kwargs):
super().__init__(num_quantizers = quantizer_kwargs["n_codebooks"], codebook_size = quantizer_kwargs["codebook_size"], tokens_id = "codes")
self.quantizer = DACResidualVQ(**quantizer_kwargs)
self.num_quantizers = quantizer_kwargs["n_codebooks"]
self.quantize_on_decode = quantize_on_decode
def encode(self, x, return_info=False, **kwargs):
info = {}
info["pre_quantizer"] = x
if self.quantize_on_decode:
return x, info if return_info else x
z, codes, latents, commitment_loss, codebook_loss = self.quantizer(x, **kwargs)
output = {
"z": z,
"codes": codes,
"latents": latents,
"vq/commitment_loss": commitment_loss,
"vq/codebook_loss": codebook_loss,
}
output["vq/commitment_loss"] /= self.num_quantizers
output["vq/codebook_loss"] /= self.num_quantizers
info.update(output)
if return_info:
return output["z"], info
return output["z"]
def decode(self, x):
if self.quantize_on_decode:
x = self.quantizer(x)[0]
return x
def decode_tokens(self, codes, **kwargs):
latents, _, _ = self.quantizer.from_codes(codes)
return self.decode(latents, **kwargs)
class DACRVQVAEBottleneck(DiscreteBottleneck):
def __init__(self, quantize_on_decode=False, **quantizer_kwargs):
super().__init__(num_quantizers = quantizer_kwargs["n_codebooks"], codebook_size = quantizer_kwargs["codebook_size"], tokens_id = "codes")
self.quantizer = DACResidualVQ(**quantizer_kwargs)
self.num_quantizers = quantizer_kwargs["n_codebooks"]
self.quantize_on_decode = quantize_on_decode
def encode(self, x, return_info=False, n_quantizers: int = None):
info = {}
mean, scale = x.chunk(2, dim=1)
x, kl = vae_sample(mean, scale)
info["pre_quantizer"] = x
info["kl"] = kl
if self.quantize_on_decode:
return x, info if return_info else x
z, codes, latents, commitment_loss, codebook_loss = self.quantizer(x, n_quantizers=n_quantizers)
output = {
"z": z,
"codes": codes,
"latents": latents,
"vq/commitment_loss": commitment_loss,
"vq/codebook_loss": codebook_loss,
}
output["vq/commitment_loss"] /= self.num_quantizers
output["vq/codebook_loss"] /= self.num_quantizers
info.update(output)
if return_info:
return output["z"], info
return output["z"]
def decode(self, x):
if self.quantize_on_decode:
x = self.quantizer(x)[0]
return x
def decode_tokens(self, codes, **kwargs):
latents, _, _ = self.quantizer.from_codes(codes)
return self.decode(latents, **kwargs)
class ValueLoss(LossModule):
def __init__(self, key: str, name, weight: float = 1.0):
super().__init__(name=name, weight=weight)
self.key = key
def forward(self, info):
return self.weight * info[self.key]
def create_loss_modules_from_bottleneck(bottleneck, loss_config):
losses = []
if isinstance(bottleneck, VAEBottleneck) or isinstance(bottleneck, DACRVQVAEBottleneck) or isinstance(bottleneck, RVQVAEBottleneck):
try:
kl_weight = loss_config['bottleneck']['weights']['kl']
except:
kl_weight = 1e-6
kl_loss = ValueLoss(key='kl', weight=kl_weight, name='kl_loss')
losses.append(kl_loss)
if isinstance(bottleneck, RVQBottleneck) or isinstance(bottleneck, RVQVAEBottleneck):
quantizer_loss = ValueLoss(key='quantizer_loss', weight=1.0, name='quantizer_loss')
losses.append(quantizer_loss)
if isinstance(bottleneck, DACRVQBottleneck) or isinstance(bottleneck, DACRVQVAEBottleneck):
codebook_loss = ValueLoss(key='vq/codebook_loss', weight=1.0, name='codebook_loss')
commitment_loss = ValueLoss(key='vq/commitment_loss', weight=0.25, name='commitment_loss')
losses.append(codebook_loss)
losses.append(commitment_loss)
if isinstance(bottleneck, WassersteinBottleneck):
try:
mmd_weight = loss_config['bottleneck']['weights']['mmd']
except:
mmd_weight = 100
mmd_loss = ValueLoss(key='mmd', weight=mmd_weight, name='mmd_loss')
losses.append(mmd_loss)
return losses | null |
156,899 | import pytorch_lightning as pl
import sys, gc
import random
import torch
import torchaudio
import typing as tp
import wandb
from aeiou.viz import pca_point_cloud, audio_spectrogram_image, tokens_spectrogram_image
import auraloss
from ema_pytorch import EMA
from einops import rearrange
from safetensors.torch import save_file
from torch import optim
from torch.nn import functional as F
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from ..inference.sampling import get_alphas_sigmas, sample
from ..models.diffusion import DiffusionModelWrapper, ConditionedDiffusionModelWrapper
from ..models.autoencoders import DiffusionAutoencoder
from ..models.diffusion_prior import PriorType
from .autoencoders import create_loss_modules_from_bottleneck
from .losses import AuralossLoss, MSELoss, MultiLoss
from .utils import create_optimizer_from_config, create_scheduler_from_config
from time import time
def create_source_mixture(reals, num_sources=2):
# Create a fake mixture source by mixing elements from the training batch together with random offsets
source = torch.zeros_like(reals)
for i in range(reals.shape[0]):
sources_added = 0
js = list(range(reals.shape[0]))
random.shuffle(js)
for j in js:
if i == j or (i != j and sources_added < num_sources):
# Randomly offset the mixed element between 0 and the length of the source
seq_len = reals.shape[2]
offset = random.randint(0, seq_len-1)
source[i, :, offset:] += reals[j, :, :-offset]
if i == j:
# If this is the real one, shift the reals as well to ensure alignment
new_reals = torch.zeros_like(reals[i])
new_reals[:, offset:] = reals[i, :, :-offset]
reals[i] = new_reals
sources_added += 1
return source | null |
156,900 | import torch
from torch.nn import Parameter
from ..models.factory import create_model_from_config
def create_model_from_config(model_config):
model_type = model_config.get('model_type', None)
assert model_type is not None, 'model_type must be specified in model config'
if model_type == 'autoencoder':
from .autoencoders import create_autoencoder_from_config
return create_autoencoder_from_config(model_config)
elif model_type == 'diffusion_uncond':
from .diffusion import create_diffusion_uncond_from_config
return create_diffusion_uncond_from_config(model_config)
elif model_type == 'diffusion_cond' or model_type == 'diffusion_cond_inpaint' or model_type == "diffusion_prior":
from .diffusion import create_diffusion_cond_from_config
return create_diffusion_cond_from_config(model_config)
elif model_type == 'diffusion_autoencoder':
from .autoencoders import create_diffAE_from_config
return create_diffAE_from_config(model_config)
elif model_type == 'musicgen':
from .musicgen import create_musicgen_from_config
return create_musicgen_from_config(model_config)
elif model_type == 'lm':
from .lm import create_audio_lm_from_config
return create_audio_lm_from_config(model_config)
else:
raise NotImplementedError(f'Unknown model type: {model_type}')
class AutoencoderTrainingWrapper(pl.LightningModule):
def __init__(
self,
autoencoder: AudioAutoencoder,
lr: float = 1e-4,
warmup_steps: int = 0,
encoder_freeze_on_warmup: bool = False,
sample_rate=48000,
loss_config: dict = None,
optimizer_configs: dict = None,
use_ema: bool = True,
ema_copy = None,
force_input_mono = False,
latent_mask_ratio = 0.0,
teacher_model: AudioAutoencoder = None
):
super().__init__()
self.automatic_optimization = False
self.autoencoder = autoencoder
self.warmed_up = False
self.warmup_steps = warmup_steps
self.encoder_freeze_on_warmup = encoder_freeze_on_warmup
self.lr = lr
self.force_input_mono = force_input_mono
self.teacher_model = teacher_model
if optimizer_configs is None:
optimizer_configs ={
"autoencoder": {
"optimizer": {
"type": "AdamW",
"config": {
"lr": lr,
"betas": (.8, .99)
}
}
},
"discriminator": {
"optimizer": {
"type": "AdamW",
"config": {
"lr": lr,
"betas": (.8, .99)
}
}
}
}
self.optimizer_configs = optimizer_configs
if loss_config is None:
scales = [2048, 1024, 512, 256, 128, 64, 32]
hop_sizes = []
win_lengths = []
overlap = 0.75
for s in scales:
hop_sizes.append(int(s * (1 - overlap)))
win_lengths.append(s)
loss_config = {
"discriminator": {
"type": "encodec",
"config": {
"n_ffts": scales,
"hop_lengths": hop_sizes,
"win_lengths": win_lengths,
"filters": 32
},
"weights": {
"adversarial": 0.1,
"feature_matching": 5.0,
}
},
"spectral": {
"type": "mrstft",
"config": {
"fft_sizes": scales,
"hop_sizes": hop_sizes,
"win_lengths": win_lengths,
"perceptual_weighting": True
},
"weights": {
"mrstft": 1.0,
}
},
"time": {
"type": "l1",
"config": {},
"weights": {
"l1": 0.0,
}
}
}
self.loss_config = loss_config
# Spectral reconstruction loss
stft_loss_args = loss_config['spectral']['config']
if self.autoencoder.out_channels == 2:
self.sdstft = auraloss.freq.SumAndDifferenceSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
self.lrstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
else:
self.sdstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
# Discriminator
if loss_config['discriminator']['type'] == 'oobleck':
self.discriminator = OobleckDiscriminator(**loss_config['discriminator']['config'])
elif loss_config['discriminator']['type'] == 'encodec':
self.discriminator = EncodecDiscriminator(in_channels=self.autoencoder.out_channels, **loss_config['discriminator']['config'])
elif loss_config['discriminator']['type'] == 'dac':
self.discriminator = DACGANLoss(channels=self.autoencoder.out_channels, sample_rate=sample_rate, **loss_config['discriminator']['config'])
self.gen_loss_modules = []
# Adversarial and feature matching losses
self.gen_loss_modules += [
ValueLoss(key='loss_adv', weight=self.loss_config['discriminator']['weights']['adversarial'], name='loss_adv'),
ValueLoss(key='feature_matching_distance', weight=self.loss_config['discriminator']['weights']['feature_matching'], name='feature_matching'),
]
if self.teacher_model is not None:
# Distillation losses
stft_loss_weight = self.loss_config['spectral']['weights']['mrstft'] * 0.25
self.gen_loss_modules += [
AuralossLoss(self.sdstft, 'reals', 'decoded', name='mrstft_loss', weight=stft_loss_weight), # Reconstruction loss
AuralossLoss(self.sdstft, 'decoded', 'teacher_decoded', name='mrstft_loss_distill', weight=stft_loss_weight), # Distilled model's decoder is compatible with teacher's decoder
AuralossLoss(self.sdstft, 'reals', 'own_latents_teacher_decoded', name='mrstft_loss_own_latents_teacher', weight=stft_loss_weight), # Distilled model's encoder is compatible with teacher's decoder
AuralossLoss(self.sdstft, 'reals', 'teacher_latents_own_decoded', name='mrstft_loss_teacher_latents_own', weight=stft_loss_weight) # Teacher's encoder is compatible with distilled model's decoder
]
else:
# Reconstruction loss
self.gen_loss_modules += [
AuralossLoss(self.sdstft, 'reals', 'decoded', name='mrstft_loss', weight=self.loss_config['spectral']['weights']['mrstft']),
]
if self.autoencoder.out_channels == 2:
# Add left and right channel reconstruction losses in addition to the sum and difference
self.gen_loss_modules += [
AuralossLoss(self.lrstft, 'reals_left', 'decoded_left', name='stft_loss_left', weight=self.loss_config['spectral']['weights']['mrstft']/2),
AuralossLoss(self.lrstft, 'reals_right', 'decoded_right', name='stft_loss_right', weight=self.loss_config['spectral']['weights']['mrstft']/2),
]
self.gen_loss_modules += [
AuralossLoss(self.sdstft, 'reals', 'decoded', name='mrstft_loss', weight=self.loss_config['spectral']['weights']['mrstft']),
]
if self.loss_config['time']['weights']['l1'] > 0.0:
self.gen_loss_modules.append(L1Loss(key_a='reals', key_b='decoded', weight=self.loss_config['time']['weights']['l1'], name='l1_time_loss'))
if self.autoencoder.bottleneck is not None:
self.gen_loss_modules += create_loss_modules_from_bottleneck(self.autoencoder.bottleneck, self.loss_config)
self.losses_gen = MultiLoss(self.gen_loss_modules)
self.disc_loss_modules = [
ValueLoss(key='loss_dis', weight=1.0, name='discriminator_loss'),
]
self.losses_disc = MultiLoss(self.disc_loss_modules)
# Set up EMA for model weights
self.autoencoder_ema = None
self.use_ema = use_ema
if self.use_ema:
self.autoencoder_ema = EMA(
self.autoencoder,
ema_model=ema_copy,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1
)
self.latent_mask_ratio = latent_mask_ratio
def configure_optimizers(self):
opt_gen = create_optimizer_from_config(self.optimizer_configs['autoencoder']['optimizer'], self.autoencoder.parameters())
opt_disc = create_optimizer_from_config(self.optimizer_configs['discriminator']['optimizer'], self.discriminator.parameters())
if "scheduler" in self.optimizer_configs['autoencoder'] and "scheduler" in self.optimizer_configs['discriminator']:
sched_gen = create_scheduler_from_config(self.optimizer_configs['autoencoder']['scheduler'], opt_gen)
sched_disc = create_scheduler_from_config(self.optimizer_configs['discriminator']['scheduler'], opt_disc)
return [opt_gen, opt_disc], [sched_gen, sched_disc]
return [opt_gen, opt_disc]
def training_step(self, batch, batch_idx):
reals, _ = batch
# Remove extra dimension added by WebDataset
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
if self.global_step >= self.warmup_steps:
self.warmed_up = True
loss_info = {}
loss_info["reals"] = reals
encoder_input = reals
if self.force_input_mono and encoder_input.shape[1] > 1:
encoder_input = encoder_input.mean(dim=1, keepdim=True)
loss_info["encoder_input"] = encoder_input
data_std = encoder_input.std()
if self.warmed_up and self.encoder_freeze_on_warmup:
with torch.no_grad():
latents, encoder_info = self.autoencoder.encode(encoder_input, return_info=True)
else:
latents, encoder_info = self.autoencoder.encode(encoder_input, return_info=True)
loss_info["latents"] = latents
loss_info.update(encoder_info)
# Encode with teacher model for distillation
if self.teacher_model is not None:
with torch.no_grad():
teacher_latents = self.teacher_model.encode(encoder_input, return_info=False)
loss_info['teacher_latents'] = teacher_latents
# Optionally mask out some latents for noise resistance
if self.latent_mask_ratio > 0.0:
mask = torch.rand_like(latents) < self.latent_mask_ratio
latents = torch.where(mask, torch.zeros_like(latents), latents)
decoded = self.autoencoder.decode(latents)
loss_info["decoded"] = decoded
if self.autoencoder.out_channels == 2:
loss_info["decoded_left"] = decoded[:, 0:1, :]
loss_info["decoded_right"] = decoded[:, 1:2, :]
loss_info["reals_left"] = reals[:, 0:1, :]
loss_info["reals_right"] = reals[:, 1:2, :]
# Distillation
if self.teacher_model is not None:
with torch.no_grad():
teacher_decoded = self.teacher_model.decode(teacher_latents)
own_latents_teacher_decoded = self.teacher_model.decode(latents) #Distilled model's latents decoded by teacher
teacher_latents_own_decoded = self.autoencoder.decode(teacher_latents) #Teacher's latents decoded by distilled model
loss_info['teacher_decoded'] = teacher_decoded
loss_info['own_latents_teacher_decoded'] = own_latents_teacher_decoded
loss_info['teacher_latents_own_decoded'] = teacher_latents_own_decoded
if self.warmed_up:
loss_dis, loss_adv, feature_matching_distance = self.discriminator.loss(reals, decoded)
else:
loss_dis = torch.tensor(0.).to(reals)
loss_adv = torch.tensor(0.).to(reals)
feature_matching_distance = torch.tensor(0.).to(reals)
loss_info["loss_dis"] = loss_dis
loss_info["loss_adv"] = loss_adv
loss_info["feature_matching_distance"] = feature_matching_distance
opt_gen, opt_disc = self.optimizers()
lr_schedulers = self.lr_schedulers()
sched_gen = None
sched_disc = None
if lr_schedulers is not None:
sched_gen, sched_disc = lr_schedulers
# Train the discriminator
if self.global_step % 2 and self.warmed_up:
loss, losses = self.losses_disc(loss_info)
log_dict = {
'train/disc_lr': opt_disc.param_groups[0]['lr']
}
opt_disc.zero_grad()
self.manual_backward(loss)
opt_disc.step()
if sched_disc is not None:
# sched step every step
sched_disc.step()
# Train the generator
else:
loss, losses = self.losses_gen(loss_info)
if self.use_ema:
self.autoencoder_ema.update()
opt_gen.zero_grad()
self.manual_backward(loss)
opt_gen.step()
if sched_gen is not None:
# scheduler step every step
sched_gen.step()
log_dict = {
'train/loss': loss.detach(),
'train/latent_std': latents.std().detach(),
'train/data_std': data_std.detach(),
'train/gen_lr': opt_gen.param_groups[0]['lr']
}
for loss_name, loss_value in losses.items():
log_dict[f'train/{loss_name}'] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def export_model(self, path, use_safetensors=False):
if self.autoencoder_ema is not None:
model = self.autoencoder_ema.ema_model
else:
model = self.autoencoder
if use_safetensors:
save_model(model, path)
else:
torch.save({"state_dict": model.state_dict()}, path)
class DiffusionUncondTrainingWrapper(pl.LightningModule):
'''
Wrapper for training an unconditional audio diffusion model (like Dance Diffusion).
'''
def __init__(
self,
model: DiffusionModelWrapper,
lr: float = 1e-4
):
super().__init__()
self.diffusion = model
self.diffusion_ema = EMA(
self.diffusion.model,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1
)
self.lr = lr
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
loss_modules = [
MSELoss("v",
"targets",
weight=1.0,
name="mse_loss"
)
]
self.losses = MultiLoss(loss_modules)
def configure_optimizers(self):
return optim.Adam([*self.diffusion.parameters()], lr=self.lr)
def training_step(self, batch, batch_idx):
reals = batch[0]
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
# Draw uniformly distributed continuous timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(self.device)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
diffusion_input = reals
loss_info = {}
loss_info["audio_reals"] = diffusion_input
if self.diffusion.pretransform is not None:
with torch.set_grad_enabled(self.diffusion.pretransform.enable_grad):
diffusion_input = self.diffusion.pretransform.encode(diffusion_input)
loss_info["reals"] = diffusion_input
# Combine the ground truth data and the noise
alphas = alphas[:, None, None]
sigmas = sigmas[:, None, None]
noise = torch.randn_like(diffusion_input)
noised_inputs = diffusion_input * alphas + noise * sigmas
targets = noise * alphas - diffusion_input * sigmas
with torch.cuda.amp.autocast():
v = self.diffusion(noised_inputs, t)
loss_info.update({
"v": v,
"targets": targets
})
loss, losses = self.losses(loss_info)
log_dict = {
'train/loss': loss.detach(),
'train/std_data': diffusion_input.std(),
}
for loss_name, loss_value in losses.items():
log_dict[f"train/{loss_name}"] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def on_before_zero_grad(self, *args, **kwargs):
self.diffusion_ema.update()
def export_model(self, path, use_safetensors=False):
self.diffusion.model = self.diffusion_ema.ema_model
if use_safetensors:
save_file(self.diffusion.state_dict(), path)
else:
torch.save({"state_dict": self.diffusion.state_dict()}, path)
class DiffusionCondTrainingWrapper(pl.LightningModule):
'''
Wrapper for training a conditional audio diffusion model.
'''
def __init__(
self,
model: ConditionedDiffusionModelWrapper,
lr: float = None,
causal_dropout: float = 0.0,
mask_padding: bool = False,
mask_padding_dropout: float = 0.0,
use_ema: bool = True,
log_loss_info: bool = False,
optimizer_configs: dict = None,
use_reconstruction_loss: bool = False
):
super().__init__()
self.diffusion = model
if use_ema:
self.diffusion_ema = EMA(
self.diffusion.model,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1,
include_online_model=False
)
else:
self.diffusion_ema = None
self.mask_padding = mask_padding
self.mask_padding_dropout = mask_padding_dropout
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
self.causal_dropout = causal_dropout
self.loss_modules = [
MSELoss("v",
"targets",
weight=1.0,
mask_key="padding_mask" if self.mask_padding else None,
name="mse_loss"
)
]
self.use_reconstruction_loss = use_reconstruction_loss
if use_reconstruction_loss:
scales = [2048, 1024, 512, 256, 128, 64, 32]
hop_sizes = []
win_lengths = []
overlap = 0.75
for s in scales:
hop_sizes.append(int(s * (1 - overlap)))
win_lengths.append(s)
sample_rate = model.sample_rate
stft_loss_args = {
"fft_sizes": scales,
"hop_sizes": hop_sizes,
"win_lengths": win_lengths,
"perceptual_weighting": True
}
out_channels = model.io_channels
if model.pretransform is not None:
out_channels = model.pretransform.io_channels
self.audio_out_channels = out_channels
if self.audio_out_channels == 2:
self.sdstft = auraloss.freq.SumAndDifferenceSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
self.lrstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
# Add left and right channel reconstruction losses in addition to the sum and difference
self.loss_modules += [
AuralossLoss(self.lrstft, 'audio_reals_left', 'pred_left', name='stft_loss_left', weight=0.05),
AuralossLoss(self.lrstft, 'audio_reals_right', 'pred_right', name='stft_loss_right', weight=0.05),
]
else:
self.sdstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
self.loss_modules.append(
AuralossLoss(self.sdstft, 'audio_reals', 'audio_pred', name='mrstft_loss', weight=0.1), # Reconstruction loss
)
self.losses = MultiLoss(self.loss_modules)
self.log_loss_info = log_loss_info
assert lr is not None or optimizer_configs is not None, "Must specify either lr or optimizer_configs in training config"
if optimizer_configs is None:
optimizer_configs = {
"diffusion": {
"optimizer": {
"type": "Adam",
"config": {
"lr": lr
}
}
}
}
else:
if lr is not None:
print(f"WARNING: learning_rate and optimizer_configs both specified in config. Ignoring learning_rate and using optimizer_configs.")
self.optimizer_configs = optimizer_configs
def configure_optimizers(self):
diffusion_opt_config = self.optimizer_configs['diffusion']
opt_diff = create_optimizer_from_config(diffusion_opt_config['optimizer'], self.diffusion.parameters())
if "scheduler" in diffusion_opt_config:
sched_diff = create_scheduler_from_config(diffusion_opt_config['scheduler'], opt_diff)
sched_diff_config = {
"scheduler": sched_diff,
"interval": "step"
}
return [opt_diff], [sched_diff_config]
return [opt_diff]
def training_step(self, batch, batch_idx):
reals, metadata = batch
p = Profiler()
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
loss_info = {}
# Draw uniformly distributed continuous timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(self.device)
# Replace 1% of t with ones to ensure training on terminal SNR
t = torch.where(torch.rand_like(t) < 0.01, torch.ones_like(t), t)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
diffusion_input = reals
loss_info["audio_reals"] = diffusion_input
p.tick("setup")
with torch.cuda.amp.autocast():
conditioning = self.diffusion.conditioner(metadata, self.device)
# If mask_padding is on, randomly drop the padding masks to allow for learning silence padding
use_padding_mask = self.mask_padding and random.random() > self.mask_padding_dropout
# Create batch tensor of attention masks from the "mask" field of the metadata array
if use_padding_mask:
padding_masks = torch.stack([md["padding_mask"][0] for md in metadata], dim=0).to(self.device) # Shape (batch_size, sequence_length)
p.tick("conditioning")
if self.diffusion.pretransform is not None:
self.diffusion.pretransform.to(self.device)
with torch.cuda.amp.autocast() and torch.set_grad_enabled(self.diffusion.pretransform.enable_grad):
diffusion_input = self.diffusion.pretransform.encode(diffusion_input)
p.tick("pretransform")
# If mask_padding is on, interpolate the padding masks to the size of the pretransformed input
if use_padding_mask:
padding_masks = F.interpolate(padding_masks.unsqueeze(1).float(), size=diffusion_input.shape[2], mode="nearest").squeeze(1).bool()
# Combine the ground truth data and the noise
alphas = alphas[:, None, None]
sigmas = sigmas[:, None, None]
noise = torch.randn_like(diffusion_input)
noised_inputs = diffusion_input * alphas + noise * sigmas
targets = noise * alphas - diffusion_input * sigmas
p.tick("noise")
extra_args = {}
if self.causal_dropout > 0.0:
extra_args["causal"] = random.random() < self.causal_dropout
if use_padding_mask:
extra_args["mask"] = padding_masks
with torch.cuda.amp.autocast():
p.tick("amp")
v = self.diffusion(noised_inputs, t, cond=conditioning, cfg_dropout_prob = 0.1, **extra_args)
p.tick("diffusion")
loss_info.update({
"v": v,
"targets": targets,
"padding_mask": padding_masks if use_padding_mask else None,
})
if self.use_reconstruction_loss:
pred = noised_inputs * alphas - v * sigmas
loss_info["pred"] = pred
if self.diffusion.pretransform is not None:
pred = self.diffusion.pretransform.decode(pred)
loss_info["audio_pred"] = pred
if self.audio_out_channels == 2:
loss_info["pred_left"] = pred[:, 0:1, :]
loss_info["pred_right"] = pred[:, 1:2, :]
loss_info["audio_reals_left"] = loss_info["audio_reals"][:, 0:1, :]
loss_info["audio_reals_right"] = loss_info["audio_reals"][:, 1:2, :]
loss, losses = self.losses(loss_info)
p.tick("loss")
if self.log_loss_info:
# Loss debugging logs
num_loss_buckets = 10
bucket_size = 1 / num_loss_buckets
loss_all = F.mse_loss(v, targets, reduction="none")
sigmas = rearrange(self.all_gather(sigmas), "w b c n -> (w b) c n").squeeze()
# gather loss_all across all GPUs
loss_all = rearrange(self.all_gather(loss_all), "w b c n -> (w b) c n")
# Bucket loss values based on corresponding sigma values, bucketing sigma values by bucket_size
loss_all = torch.stack([loss_all[(sigmas >= i) & (sigmas < i + bucket_size)].mean() for i in torch.arange(0, 1, bucket_size).to(self.device)])
# Log bucketed losses with corresponding sigma bucket values, if it's not NaN
debug_log_dict = {
f"model/loss_all_{i/num_loss_buckets:.1f}": loss_all[i].detach() for i in range(num_loss_buckets) if not torch.isnan(loss_all[i])
}
self.log_dict(debug_log_dict)
log_dict = {
'train/loss': loss.detach(),
'train/std_data': diffusion_input.std(),
'train/lr': self.trainer.optimizers[0].param_groups[0]['lr']
}
for loss_name, loss_value in losses.items():
log_dict[f"train/{loss_name}"] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
p.tick("log")
#print(f"Profiler: {p}")
return loss
def on_before_zero_grad(self, *args, **kwargs):
if self.diffusion_ema is not None:
self.diffusion_ema.update()
def export_model(self, path, use_safetensors=False):
if self.diffusion_ema is not None:
self.diffusion.model = self.diffusion_ema.ema_model
if use_safetensors:
save_file(self.diffusion.state_dict(), path)
else:
torch.save({"state_dict": self.diffusion.state_dict()}, path)
class DiffusionCondInpaintTrainingWrapper(pl.LightningModule):
'''
Wrapper for training a conditional audio diffusion model.
'''
def __init__(
self,
model: ConditionedDiffusionModelWrapper,
lr: float = 1e-4,
max_mask_segments = 10
):
super().__init__()
self.diffusion = model
self.diffusion_ema = EMA(
self.diffusion.model,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1,
include_online_model=False
)
self.lr = lr
self.max_mask_segments = max_mask_segments
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
self.loss_modules = [
MSELoss("v",
"targets",
weight=1.0,
name="mse_loss"
)
]
self.losses = MultiLoss(self.loss_modules)
def configure_optimizers(self):
return optim.Adam([*self.diffusion.parameters()], lr=self.lr)
def random_mask(self, sequence, max_mask_length):
b, _, sequence_length = sequence.size()
# Create a mask tensor for each batch element
masks = []
for i in range(b):
mask_type = random.randint(0, 2)
if mask_type == 0: # Random mask with multiple segments
num_segments = random.randint(1, self.max_mask_segments)
max_segment_length = max_mask_length // num_segments
segment_lengths = random.sample(range(1, max_segment_length + 1), num_segments)
mask = torch.ones((1, 1, sequence_length))
for length in segment_lengths:
mask_start = random.randint(0, sequence_length - length)
mask[:, :, mask_start:mask_start + length] = 0
elif mask_type == 1: # Full mask
mask = torch.zeros((1, 1, sequence_length))
elif mask_type == 2: # Causal mask
mask = torch.ones((1, 1, sequence_length))
mask_length = random.randint(1, max_mask_length)
mask[:, :, -mask_length:] = 0
mask = mask.to(sequence.device)
masks.append(mask)
# Concatenate the mask tensors into a single tensor
mask = torch.cat(masks, dim=0).to(sequence.device)
# Apply the mask to the sequence tensor for each batch element
masked_sequence = sequence * mask
return masked_sequence, mask
def training_step(self, batch, batch_idx):
reals, metadata = batch
p = Profiler()
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
# Draw uniformly distributed continuous timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(self.device)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
diffusion_input = reals
p.tick("setup")
with torch.cuda.amp.autocast():
conditioning = self.diffusion.conditioner(metadata, self.device)
p.tick("conditioning")
if self.diffusion.pretransform is not None:
self.diffusion.pretransform.to(self.device)
with torch.cuda.amp.autocast() and torch.set_grad_enabled(self.diffusion.pretransform.enable_grad):
diffusion_input = self.diffusion.pretransform.encode(diffusion_input)
p.tick("pretransform")
# Max mask size is the full sequence length
max_mask_length = diffusion_input.shape[2]
# Create a mask of random length for a random slice of the input
masked_input, mask = self.random_mask(diffusion_input, max_mask_length)
conditioning['inpaint_mask'] = [mask]
conditioning['inpaint_masked_input'] = [masked_input]
# Combine the ground truth data and the noise
alphas = alphas[:, None, None]
sigmas = sigmas[:, None, None]
noise = torch.randn_like(diffusion_input)
noised_inputs = diffusion_input * alphas + noise * sigmas
targets = noise * alphas - diffusion_input * sigmas
p.tick("noise")
with torch.cuda.amp.autocast():
p.tick("amp")
v = self.diffusion(noised_inputs, t, cond=conditioning, cfg_dropout_prob = 0.1)
p.tick("diffusion")
loss_info = {
"v": v,
"targets": targets
}
loss, losses = self.losses(loss_info)
log_dict = {
'train/loss': loss.detach(),
'train/std_data': diffusion_input.std(),
}
for loss_name, loss_value in losses.items():
log_dict[f"train/{loss_name}"] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
p.tick("log")
#print(f"Profiler: {p}")
return loss
def on_before_zero_grad(self, *args, **kwargs):
self.diffusion_ema.update()
def export_model(self, path):
self.diffusion.model = self.diffusion_ema.ema_model
save_file(self.diffusion.state_dict(), path)
class DiffusionAutoencoderTrainingWrapper(pl.LightningModule):
'''
Wrapper for training a diffusion autoencoder
'''
def __init__(
self,
model: DiffusionAutoencoder,
lr: float = 1e-4,
ema_copy = None,
use_reconstruction_loss: bool = False
):
super().__init__()
self.diffae = model
self.diffae_ema = EMA(
self.diffae,
ema_model=ema_copy,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1,
include_online_model=False
)
self.lr = lr
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
loss_modules = [
MSELoss("v",
"targets",
weight=1.0,
name="mse_loss"
)
]
if model.bottleneck is not None:
# TODO: Use loss config for configurable bottleneck weights and reconstruction losses
loss_modules += create_loss_modules_from_bottleneck(model.bottleneck, {})
self.use_reconstruction_loss = use_reconstruction_loss
if use_reconstruction_loss:
scales = [2048, 1024, 512, 256, 128, 64, 32]
hop_sizes = []
win_lengths = []
overlap = 0.75
for s in scales:
hop_sizes.append(int(s * (1 - overlap)))
win_lengths.append(s)
sample_rate = model.sample_rate
stft_loss_args = {
"fft_sizes": scales,
"hop_sizes": hop_sizes,
"win_lengths": win_lengths,
"perceptual_weighting": True
}
out_channels = model.out_channels
if model.pretransform is not None:
out_channels = model.pretransform.io_channels
if out_channels == 2:
self.sdstft = auraloss.freq.SumAndDifferenceSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
else:
self.sdstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
loss_modules.append(
AuralossLoss(self.sdstft, 'audio_reals', 'audio_pred', name='mrstft_loss', weight=0.1), # Reconstruction loss
)
self.losses = MultiLoss(loss_modules)
def configure_optimizers(self):
return optim.Adam([*self.diffae.parameters()], lr=self.lr)
def training_step(self, batch, batch_idx):
reals = batch[0]
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
loss_info = {}
loss_info["audio_reals"] = reals
if self.diffae.pretransform is not None:
with torch.no_grad():
reals = self.diffae.pretransform.encode(reals)
loss_info["reals"] = reals
#Encode reals, skipping the pretransform since it was already applied
latents, encoder_info = self.diffae.encode(reals, return_info=True, skip_pretransform=True)
loss_info["latents"] = latents
loss_info.update(encoder_info)
if self.diffae.decoder is not None:
latents = self.diffae.decoder(latents)
# Upsample latents to match diffusion length
if latents.shape[2] != reals.shape[2]:
latents = F.interpolate(latents, size=reals.shape[2], mode='nearest')
loss_info["latents_upsampled"] = latents
# Draw uniformly distributed continuous timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(self.device)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
# Combine the ground truth data and the noise
alphas = alphas[:, None, None]
sigmas = sigmas[:, None, None]
noise = torch.randn_like(reals)
noised_reals = reals * alphas + noise * sigmas
targets = noise * alphas - reals * sigmas
with torch.cuda.amp.autocast():
v = self.diffae.diffusion(noised_reals, t, input_concat_cond=latents)
loss_info.update({
"v": v,
"targets": targets
})
if self.use_reconstruction_loss:
pred = noised_reals * alphas - v * sigmas
loss_info["pred"] = pred
if self.diffae.pretransform is not None:
pred = self.diffae.pretransform.decode(pred)
loss_info["audio_pred"] = pred
loss, losses = self.losses(loss_info)
log_dict = {
'train/loss': loss.detach(),
'train/std_data': reals.std(),
'train/latent_std': latents.std(),
}
for loss_name, loss_value in losses.items():
log_dict[f"train/{loss_name}"] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def on_before_zero_grad(self, *args, **kwargs):
self.diffae_ema.update()
def export_model(self, path, use_safetensors=False):
model = self.diffae_ema.ema_model
if use_safetensors:
save_file(model.state_dict(), path)
else:
torch.save({"state_dict": model.state_dict()}, path)
class DiffusionPriorTrainingWrapper(pl.LightningModule):
'''
Wrapper for training a diffusion prior for inverse problems
Prior types:
mono_stereo: The prior is conditioned on a mono version of the audio to generate a stereo version
'''
def __init__(
self,
model: ConditionedDiffusionModelWrapper,
lr: float = 1e-4,
ema_copy = None,
prior_type: PriorType = PriorType.MonoToStereo,
use_reconstruction_loss: bool = False,
log_loss_info: bool = False,
):
super().__init__()
self.diffusion = model
self.diffusion_ema = EMA(
self.diffusion,
ema_model=ema_copy,
beta=0.9999,
power=3/4,
update_every=1,
update_after_step=1,
include_online_model=False
)
self.lr = lr
self.rng = torch.quasirandom.SobolEngine(1, scramble=True)
self.log_loss_info = log_loss_info
loss_modules = [
MSELoss("v",
"targets",
weight=1.0,
name="mse_loss"
)
]
self.use_reconstruction_loss = use_reconstruction_loss
if use_reconstruction_loss:
scales = [2048, 1024, 512, 256, 128, 64, 32]
hop_sizes = []
win_lengths = []
overlap = 0.75
for s in scales:
hop_sizes.append(int(s * (1 - overlap)))
win_lengths.append(s)
sample_rate = model.sample_rate
stft_loss_args = {
"fft_sizes": scales,
"hop_sizes": hop_sizes,
"win_lengths": win_lengths,
"perceptual_weighting": True
}
out_channels = model.io_channels
self.audio_out_channels = out_channels
if model.pretransform is not None:
out_channels = model.pretransform.io_channels
if self.audio_out_channels == 2:
self.sdstft = auraloss.freq.SumAndDifferenceSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
self.lrstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
# Add left and right channel reconstruction losses in addition to the sum and difference
self.loss_modules += [
AuralossLoss(self.lrstft, 'audio_reals_left', 'pred_left', name='stft_loss_left', weight=0.05),
AuralossLoss(self.lrstft, 'audio_reals_right', 'pred_right', name='stft_loss_right', weight=0.05),
]
else:
self.sdstft = auraloss.freq.MultiResolutionSTFTLoss(sample_rate=sample_rate, **stft_loss_args)
self.loss_modules.append(
AuralossLoss(self.sdstft, 'audio_reals', 'audio_pred', name='mrstft_loss', weight=0.1), # Reconstruction loss
)
self.losses = MultiLoss(loss_modules)
self.prior_type = prior_type
def configure_optimizers(self):
return optim.Adam([*self.diffusion.parameters()], lr=self.lr)
def training_step(self, batch, batch_idx):
reals, metadata = batch
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
loss_info = {}
loss_info["audio_reals"] = reals
if self.prior_type == PriorType.MonoToStereo:
source = reals.mean(dim=1, keepdim=True).repeat(1, reals.shape[1], 1).to(self.device)
loss_info["audio_reals_mono"] = source
elif self.prior_type == PriorType.SourceSeparation:
source = create_source_mixture(reals)
loss_info["audio_mixture"] = source
else:
raise ValueError(f"Unknown prior type {self.prior_type}")
if self.diffusion.pretransform is not None:
with torch.no_grad():
reals = self.diffusion.pretransform.encode(reals)
if self.prior_type in [PriorType.MonoToStereo, PriorType.SourceSeparation]:
source = self.diffusion.pretransform.encode(source)
if self.diffusion.conditioner is not None:
with torch.cuda.amp.autocast():
conditioning = self.diffusion.conditioner(metadata, self.device)
else:
conditioning = {}
loss_info["reals"] = reals
# Draw uniformly distributed continuous timesteps
t = self.rng.draw(reals.shape[0])[:, 0].to(self.device)
# Calculate the noise schedule parameters for those timesteps
alphas, sigmas = get_alphas_sigmas(t)
# Combine the ground truth data and the noise
alphas = alphas[:, None, None]
sigmas = sigmas[:, None, None]
noise = torch.randn_like(reals)
noised_reals = reals * alphas + noise * sigmas
targets = noise * alphas - reals * sigmas
with torch.cuda.amp.autocast():
conditioning['source'] = [source]
v = self.diffusion(noised_reals, t, cond=conditioning, cfg_dropout_prob = 0.1)
loss_info.update({
"v": v,
"targets": targets
})
if self.use_reconstruction_loss:
pred = noised_reals * alphas - v * sigmas
loss_info["pred"] = pred
if self.diffusion.pretransform is not None:
pred = self.diffusion.pretransform.decode(pred)
loss_info["audio_pred"] = pred
if self.audio_out_channels == 2:
loss_info["pred_left"] = pred[:, 0:1, :]
loss_info["pred_right"] = pred[:, 1:2, :]
loss_info["audio_reals_left"] = loss_info["audio_reals"][:, 0:1, :]
loss_info["audio_reals_right"] = loss_info["audio_reals"][:, 1:2, :]
loss, losses = self.losses(loss_info)
if self.log_loss_info:
# Loss debugging logs
num_loss_buckets = 10
bucket_size = 1 / num_loss_buckets
loss_all = F.mse_loss(v, targets, reduction="none")
sigmas = rearrange(self.all_gather(sigmas), "w b c n -> (w b) c n").squeeze()
# gather loss_all across all GPUs
loss_all = rearrange(self.all_gather(loss_all), "w b c n -> (w b) c n")
# Bucket loss values based on corresponding sigma values, bucketing sigma values by bucket_size
loss_all = torch.stack([loss_all[(sigmas >= i) & (sigmas < i + bucket_size)].mean() for i in torch.arange(0, 1, bucket_size).to(self.device)])
# Log bucketed losses with corresponding sigma bucket values, if it's not NaN
debug_log_dict = {
f"model/loss_all_{i/num_loss_buckets:.1f}": loss_all[i].detach() for i in range(num_loss_buckets) if not torch.isnan(loss_all[i])
}
self.log_dict(debug_log_dict)
log_dict = {
'train/loss': loss.detach(),
'train/std_data': reals.std()
}
for loss_name, loss_value in losses.items():
log_dict[f"train/{loss_name}"] = loss_value.detach()
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def on_before_zero_grad(self, *args, **kwargs):
self.diffusion_ema.update()
def export_model(self, path, use_safetensors=False):
#model = self.diffusion_ema.ema_model
model = self.diffusion
if use_safetensors:
save_file(model.state_dict(), path)
else:
torch.save({"state_dict": model.state_dict()}, path)
class PriorType(Enum):
MonoToStereo = 1
SourceSeparation = 2
class MusicGenTrainingWrapper(pl.LightningModule):
def __init__(self, musicgen_model, lr = 1e-4, ema_copy=None):
super().__init__()
self.musicgen_model: MusicGen = musicgen_model
self.musicgen_model.compression_model.requires_grad_(False)
self.lm = self.musicgen_model.lm
self.lm.to(torch.float32).train().requires_grad_(True)
self.lm_ema = EMA(self.lm, ema_model=ema_copy, beta=0.99, update_every=10)
self.cfg_dropout = ClassifierFreeGuidanceDropout(0.1)
self.lr = lr
def configure_optimizers(self):
optimizer = optim.AdamW([*self.lm.parameters()], lr=self.lr, betas=(0.9, 0.95), weight_decay=0.1)
return optimizer
# Copied and modified from https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/solvers/musicgen.py under MIT license
# License can be found in LICENSES/LICENSE_META.txt
def _compute_cross_entropy(
self, logits: torch.Tensor, targets: torch.Tensor, mask: torch.Tensor
) -> tp.Tuple[torch.Tensor, tp.List[torch.Tensor]]:
"""Compute cross entropy between multi-codebook targets and model's logits.
The cross entropy is computed per codebook to provide codebook-level cross entropy.
Valid timesteps for each of the codebook are pulled from the mask, where invalid
timesteps are set to 0.
Args:
logits (torch.Tensor): Model's logits of shape [B, K, T, card].
targets (torch.Tensor): Target codes, of shape [B, K, T].
mask (torch.Tensor): Mask for valid target codes, of shape [B, K, T].
Returns:
ce (torch.Tensor): Cross entropy averaged over the codebooks
ce_per_codebook (list of torch.Tensor): Cross entropy per codebook (detached).
"""
B, K, T = targets.shape
assert logits.shape[:-1] == targets.shape
assert mask.shape == targets.shape
ce = torch.zeros([], device=targets.device)
ce_per_codebook: tp.List[torch.Tensor] = []
for k in range(K):
logits_k = logits[:, k, ...].contiguous().view(-1, logits.size(-1)) # [B x T, card]
targets_k = targets[:, k, ...].contiguous().view(-1) # [B x T]
mask_k = mask[:, k, ...].contiguous().view(-1) # [B x T]
ce_targets = targets_k[mask_k]
ce_logits = logits_k[mask_k]
q_ce = F.cross_entropy(ce_logits, ce_targets)
ce += q_ce
ce_per_codebook.append(q_ce.detach())
# average cross entropy across codebooks
ce = ce / K
return ce, ce_per_codebook
def training_step(self, batch, batch_idx):
reals, metadata = batch
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
# Convert reals to mono if necessary
if self.musicgen_model.audio_channels == 1:
reals = reals.mean(dim=1, keepdim=True)
self.musicgen_model.compression_model.to(self.device).eval()
self.lm.to(self.device).train()
self.lm.condition_provider.to(self.device).eval()
self.lm.condition_provider.conditioners["description"].device = self.device
self.lm.condition_provider.conditioners["description"].t5.to(self.device).eval()
with torch.cuda.amp.autocast():
codes, _ = self.musicgen_model.compression_model.encode(reals) # [b, k, t]
attributes = [ConditioningAttributes(text={'description': md["prompt"][0][:512]}) for md in metadata]
attributes = self.lm.cfg_dropout(attributes)
attributes = self.lm.att_dropout(attributes)
tokenized = self.lm.condition_provider.tokenize(attributes)
with torch.cuda.amp.autocast(enabled=False):
condition_tensors = self.lm.condition_provider(tokenized)
lm_output = self.lm.compute_predictions(
codes=codes,
conditions = [],
condition_tensors = condition_tensors,
)
logits = lm_output.logits # [b, k, t, c]
logits_mask = lm_output.mask # [b, k, t]
cross_entropy, cross_entropy_per_codebook = self._compute_cross_entropy(logits, codes, logits_mask)
loss = cross_entropy
log_dict = {
'train/loss': loss.detach(),
'train/cross_entropy': cross_entropy.detach(),
'train/perplexity': torch.exp(cross_entropy).detach(),
}
for k, ce_q in enumerate(cross_entropy_per_codebook):
log_dict[f'cross_entropy_q{k + 1}'] = ce_q
log_dict[f'perplexity_q{k + 1}'] = torch.exp(ce_q)
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def on_before_zero_grad(self, *args, **kwargs):
self.lm_ema.update()
def export_model(self, path):
self.musicgen_model.lm = self.lm_ema.ema_model
export_state_dict = {"state_dict": self.musicgen_model.state_dict()}
torch.save(export_state_dict, path)
class AudioLanguageModelTrainingWrapper(pl.LightningModule):
def __init__(
self,
model: AudioLanguageModelWrapper,
lr = 1e-4,
use_ema=False,
ema_copy=None,
optimizer_configs: dict = None,
):
super().__init__()
self.model = model
self.model.pretransform.requires_grad_(False)
self.model_ema = None
if use_ema:
self.model_ema = EMA(self.model, ema_model=ema_copy, beta=0.99, update_every=10)
assert lr is not None or optimizer_configs is not None, "Must specify either lr or optimizer_configs in training config"
if optimizer_configs is None:
optimizer_configs = {
"lm": {
"optimizer": {
"type": "AdamW",
"config": {
"lr": lr,
"betas": (0.9, 0.95),
"weight_decay": 0.1
}
}
}
}
else:
if lr is not None:
print(f"WARNING: learning_rate and optimizer_configs both specified in config. Ignoring learning_rate and using optimizer_configs.")
self.optimizer_configs = optimizer_configs
def configure_optimizers(self):
lm_opt_config = self.optimizer_configs['lm']
opt_lm = create_optimizer_from_config(lm_opt_config['optimizer'], self.model.parameters())
if "scheduler" in lm_opt_config:
sched_lm = create_scheduler_from_config(lm_opt_config['scheduler'], opt_lm)
sched_lm_config = {
"scheduler": sched_lm,
"interval": "step"
}
return [opt_lm], [sched_lm_config]
return [opt_lm]
# Copied and modified from https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/solvers/musicgen.py under MIT license
# License can be found in LICENSES/LICENSE_META.txt
def _compute_cross_entropy(
self, logits: torch.Tensor, targets: torch.Tensor, mask: torch.Tensor
) -> tp.Tuple[torch.Tensor, tp.List[torch.Tensor]]:
"""Compute cross entropy between multi-codebook targets and model's logits.
The cross entropy is computed per codebook to provide codebook-level cross entropy.
Valid timesteps for each of the codebook are pulled from the mask, where invalid
timesteps are set to 0.
Args:
logits (torch.Tensor): Model's logits of shape [B, K, T, card].
targets (torch.Tensor): Target codes, of shape [B, K, T].
mask (torch.Tensor): Mask for valid target codes, of shape [B, K, T].
Returns:
ce (torch.Tensor): Cross entropy averaged over the codebooks
ce_per_codebook (list of torch.Tensor): Cross entropy per codebook (detached).
"""
B, K, T = targets.shape
assert logits.shape[:-1] == targets.shape
assert mask.shape == targets.shape
ce = torch.zeros([], device=targets.device)
ce_per_codebook: tp.List[torch.Tensor] = []
for k in range(K):
logits_k = logits[:, k, ...].contiguous().view(-1, logits.size(-1)) # [B x T, card]
targets_k = targets[:, k, ...].contiguous().view(-1) # [B x T]
mask_k = mask[:, k, ...].contiguous().view(-1) # [B x T]
ce_targets = targets_k[mask_k]
ce_logits = logits_k[mask_k]
q_ce = F.cross_entropy(ce_logits, ce_targets)
ce += q_ce
ce_per_codebook.append(q_ce.detach())
# average cross entropy across codebooks
ce = ce / K
return ce, ce_per_codebook
def training_step(self, batch, batch_idx):
reals, metadata = batch
if reals.ndim == 4 and reals.shape[0] == 1:
reals = reals[0]
codes = self.model.pretransform.tokenize(reals)
padding_masks = torch.stack([md["padding_mask"][0] for md in metadata], dim=0).to(self.device) # Shape (batch_size, sequence_length)
# Interpolate padding masks to the same length as the codes
padding_masks = F.interpolate(padding_masks.unsqueeze(1).float(), size=codes.shape[2], mode='nearest').bool()
condition_tensors = None
# If the model is conditioned, get the conditioning tensors
if self.model.conditioner is not None:
condition_tensors = self.model.conditioner(metadata, self.device)
lm_output = self.model.compute_logits(codes, condition_tensors=condition_tensors, cfg_dropout_prob=0.1)
logits = lm_output.logits # [b, k, t, c]
logits_mask = lm_output.mask # [b, k, t]
logits_mask = logits_mask & padding_masks
cross_entropy, cross_entropy_per_codebook = self._compute_cross_entropy(logits, codes, logits_mask)
loss = cross_entropy
log_dict = {
'train/loss': loss.detach(),
'train/cross_entropy': cross_entropy.detach(),
'train/perplexity': torch.exp(cross_entropy).detach(),
'train/lr': self.trainer.optimizers[0].param_groups[0]['lr']
}
for k, ce_q in enumerate(cross_entropy_per_codebook):
log_dict[f'cross_entropy_q{k + 1}'] = ce_q
log_dict[f'perplexity_q{k + 1}'] = torch.exp(ce_q)
self.log_dict(log_dict, prog_bar=True, on_step=True)
return loss
def on_before_zero_grad(self, *args, **kwargs):
if self.model_ema is not None:
self.model_ema.update()
def export_model(self, path, use_safetensors=False):
model = self.model_ema.ema_model if self.model_ema is not None else self.model
if use_safetensors:
save_file(model.state_dict(), path)
else:
torch.save({"state_dict": model.state_dict()}, path)
def create_training_wrapper_from_config(model_config, model):
model_type = model_config.get('model_type', None)
assert model_type is not None, 'model_type must be specified in model config'
training_config = model_config.get('training', None)
assert training_config is not None, 'training config must be specified in model config'
if model_type == 'autoencoder':
from .autoencoders import AutoencoderTrainingWrapper
ema_copy = None
if training_config.get("use_ema", False):
ema_copy = create_model_from_config(model_config)
ema_copy = create_model_from_config(model_config) # I don't know why this needs to be called twice but it broke when I called it once
# Copy each weight to the ema copy
for name, param in model.state_dict().items():
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
ema_copy.state_dict()[name].copy_(param)
use_ema = training_config.get("use_ema", False)
latent_mask_ratio = training_config.get("latent_mask_ratio", 0.0)
teacher_model = training_config.get("teacher_model", None)
if teacher_model is not None:
teacher_model = create_model_from_config(teacher_model)
teacher_model = teacher_model.eval().requires_grad_(False)
teacher_model_ckpt = training_config.get("teacher_model_ckpt", None)
if teacher_model_ckpt is not None:
teacher_model.load_state_dict(torch.load(teacher_model_ckpt)["state_dict"])
else:
raise ValueError("teacher_model_ckpt must be specified if teacher_model is specified")
return AutoencoderTrainingWrapper(
model,
lr=training_config["learning_rate"],
warmup_steps=training_config.get("warmup_steps", 0),
encoder_freeze_on_warmup=training_config.get("encoder_freeze_on_warmup", False),
sample_rate=model_config["sample_rate"],
loss_config=training_config.get("loss_configs", None),
optimizer_configs=training_config.get("optimizer_configs", None),
use_ema=use_ema,
ema_copy=ema_copy if use_ema else None,
force_input_mono=training_config.get("force_input_mono", False),
latent_mask_ratio=latent_mask_ratio,
teacher_model=teacher_model
)
elif model_type == 'diffusion_uncond':
from .diffusion import DiffusionUncondTrainingWrapper
return DiffusionUncondTrainingWrapper(
model,
lr=training_config["learning_rate"],
)
elif model_type == 'diffusion_cond':
from .diffusion import DiffusionCondTrainingWrapper
return DiffusionCondTrainingWrapper(
model,
lr=training_config.get("learning_rate", None),
causal_dropout=training_config.get("causal_dropout", 0.0),
mask_padding=training_config.get("mask_padding", False),
mask_padding_dropout=training_config.get("mask_padding_dropout", 0.0),
use_ema = training_config.get("use_ema", True),
log_loss_info=training_config.get("log_loss_info", False),
optimizer_configs=training_config.get("optimizer_configs", None),
use_reconstruction_loss=training_config.get("use_reconstruction_loss", False),
)
elif model_type == 'diffusion_prior':
from .diffusion import DiffusionPriorTrainingWrapper
from ..models.diffusion_prior import PriorType
ema_copy = create_model_from_config(model_config)
# Copy each weight to the ema copy
for name, param in model.state_dict().items():
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
ema_copy.state_dict()[name].copy_(param)
prior_type = training_config.get("prior_type", "mono_stereo")
if prior_type == "mono_stereo":
prior_type_enum = PriorType.MonoToStereo
elif prior_type == "source_separation":
prior_type_enum = PriorType.SourceSeparation
else:
raise ValueError(f"Unknown prior type: {prior_type}")
return DiffusionPriorTrainingWrapper(
model,
lr=training_config["learning_rate"],
ema_copy=ema_copy,
prior_type=prior_type_enum,
log_loss_info=training_config.get("log_loss_info", False),
use_reconstruction_loss=training_config.get("use_reconstruction_loss", False),
)
elif model_type == 'diffusion_cond_inpaint':
from .diffusion import DiffusionCondInpaintTrainingWrapper
return DiffusionCondInpaintTrainingWrapper(
model,
lr=training_config["learning_rate"]
)
elif model_type == 'diffusion_autoencoder':
from .diffusion import DiffusionAutoencoderTrainingWrapper
ema_copy = create_model_from_config(model_config)
# Copy each weight to the ema copy
for name, param in model.state_dict().items():
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
ema_copy.state_dict()[name].copy_(param)
return DiffusionAutoencoderTrainingWrapper(
model,
ema_copy=ema_copy,
lr=training_config["learning_rate"],
use_reconstruction_loss=training_config.get("use_reconstruction_loss", False)
)
elif model_type == 'musicgen':
from .musicgen import MusicGenTrainingWrapper
ema_copy = create_model_from_config(model_config).lm
for name, param in model.lm.state_dict().items():
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
ema_copy.state_dict()[name].copy_(param)
return MusicGenTrainingWrapper(
model,
ema_copy=ema_copy,
lr=training_config["learning_rate"]
)
elif model_type == 'lm':
from .lm import AudioLanguageModelTrainingWrapper
ema_copy = create_model_from_config(model_config)
for name, param in model.state_dict().items():
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
ema_copy.state_dict()[name].copy_(param)
return AudioLanguageModelTrainingWrapper(
model,
ema_copy=ema_copy,
lr=training_config.get("learning_rate", None),
use_ema=training_config.get("use_ema", False),
optimizer_configs=training_config.get("optimizer_configs", None),
)
else:
raise NotImplementedError(f'Unknown model type: {model_type}') | null |
156,901 | import torch
from torch.nn import Parameter
from ..models.factory import create_model_from_config
class AutoencoderDemoCallback(pl.Callback):
def __init__(
self,
demo_dl,
demo_every=2000,
sample_size=65536,
sample_rate=48000
):
super().__init__()
self.demo_every = demo_every
self.demo_samples = sample_size
self.demo_dl = iter(demo_dl)
self.sample_rate = sample_rate
self.last_demo_step = -1
def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
self.last_demo_step = trainer.global_step
module.eval()
try:
demo_reals, _ = next(self.demo_dl)
# Remove extra dimension added by WebDataset
if demo_reals.ndim == 4 and demo_reals.shape[0] == 1:
demo_reals = demo_reals[0]
encoder_input = demo_reals
encoder_input = encoder_input.to(module.device)
if module.force_input_mono:
encoder_input = encoder_input.mean(dim=1, keepdim=True)
demo_reals = demo_reals.to(module.device)
with torch.no_grad():
if module.use_ema:
latents = module.autoencoder_ema.ema_model.encode(encoder_input)
fakes = module.autoencoder_ema.ema_model.decode(latents)
else:
latents = module.autoencoder.encode(encoder_input)
fakes = module.autoencoder.decode(latents)
#Interleave reals and fakes
reals_fakes = rearrange([demo_reals, fakes], 'i b d n -> (b i) d n')
# Put the demos together
reals_fakes = rearrange(reals_fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'recon_{trainer.global_step:08}.wav'
reals_fakes = reals_fakes.to(torch.float32).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, reals_fakes, self.sample_rate)
log_dict[f'recon'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'embeddings_3dpca'] = pca_point_cloud(latents)
log_dict[f'embeddings_spec'] = wandb.Image(tokens_spectrogram_image(latents))
log_dict[f'recon_melspec_left'] = wandb.Image(audio_spectrogram_image(reals_fakes))
trainer.logger.experiment.log(log_dict)
except Exception as e:
print(f'{type(e).__name__}: {e}')
raise e
finally:
module.train()
class DiffusionUncondDemoCallback(pl.Callback):
def __init__(self,
demo_every=2000,
num_demos=8,
demo_steps=250,
sample_rate=48000
):
super().__init__()
self.demo_every = demo_every
self.num_demos = num_demos
self.demo_steps = demo_steps
self.sample_rate = sample_rate
self.last_demo_step = -1
def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
self.last_demo_step = trainer.global_step
demo_samples = module.diffusion.sample_size
if module.diffusion.pretransform is not None:
demo_samples = demo_samples // module.diffusion.pretransform.downsampling_ratio
noise = torch.randn([self.num_demos, module.diffusion.io_channels, demo_samples]).to(module.device)
try:
with torch.cuda.amp.autocast():
fakes = sample(module.diffusion_ema, noise, self.demo_steps, 0)
if module.diffusion.pretransform is not None:
fakes = module.diffusion.pretransform.decode(fakes)
# Put the demos together
fakes = rearrange(fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'demo_{trainer.global_step:08}.wav'
fakes = fakes.to(torch.float32).div(torch.max(torch.abs(fakes))).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, fakes, self.sample_rate)
log_dict[f'demo'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'demo_melspec_left'] = wandb.Image(audio_spectrogram_image(fakes))
trainer.logger.experiment.log(log_dict)
del fakes
except Exception as e:
print(f'{type(e).__name__}: {e}')
finally:
gc.collect()
torch.cuda.empty_cache()
class DiffusionCondDemoCallback(pl.Callback):
def __init__(self,
demo_every=2000,
num_demos=8,
sample_size=65536,
demo_steps=250,
sample_rate=48000,
demo_conditioning: tp.Optional[tp.Dict[str, tp.Any]] = {},
demo_cfg_scales: tp.Optional[tp.List[int]] = [3, 5, 7],
demo_cond_from_batch: bool = False,
display_audio_cond: bool = False
):
super().__init__()
self.demo_every = demo_every
self.num_demos = num_demos
self.demo_samples = sample_size
self.demo_steps = demo_steps
self.sample_rate = sample_rate
self.last_demo_step = -1
self.demo_conditioning = demo_conditioning
self.demo_cfg_scales = demo_cfg_scales
# If true, the callback will use the metadata from the batch to generate the demo conditioning
self.demo_cond_from_batch = demo_cond_from_batch
# If true, the callback will display the audio conditioning
self.display_audio_cond = display_audio_cond
def on_train_batch_end(self, trainer, module: DiffusionCondTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
module.eval()
print(f"Generating demo")
self.last_demo_step = trainer.global_step
demo_samples = self.demo_samples
demo_cond = self.demo_conditioning
if self.demo_cond_from_batch:
# Get metadata from the batch
demo_cond = batch[1][:self.num_demos]
if module.diffusion.pretransform is not None:
demo_samples = demo_samples // module.diffusion.pretransform.downsampling_ratio
noise = torch.randn([self.num_demos, module.diffusion.io_channels, demo_samples]).to(module.device)
try:
print("Getting conditioning")
with torch.cuda.amp.autocast():
conditioning = module.diffusion.conditioner(demo_cond, module.device)
cond_inputs = module.diffusion.get_conditioning_inputs(conditioning)
log_dict = {}
if self.display_audio_cond:
audio_inputs = torch.cat([cond["audio"] for cond in demo_cond], dim=0)
audio_inputs = rearrange(audio_inputs, 'b d n -> d (b n)')
filename = f'demo_audio_cond_{trainer.global_step:08}.wav'
audio_inputs = audio_inputs.to(torch.float32).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, audio_inputs, self.sample_rate)
log_dict[f'demo_audio_cond'] = wandb.Audio(filename, sample_rate=self.sample_rate, caption="Audio conditioning")
log_dict[f"demo_audio_cond_melspec_left"] = wandb.Image(audio_spectrogram_image(audio_inputs))
trainer.logger.experiment.log(log_dict)
for cfg_scale in self.demo_cfg_scales:
print(f"Generating demo for cfg scale {cfg_scale}")
with torch.cuda.amp.autocast():
model = module.diffusion_ema.model if module.diffusion_ema is not None else module.diffusion.model
fakes = sample(model, noise, self.demo_steps, 0, **cond_inputs, cfg_scale=cfg_scale, batch_cfg=True)
if module.diffusion.pretransform is not None:
fakes = module.diffusion.pretransform.decode(fakes)
# Put the demos together
fakes = rearrange(fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'demo_cfg_{cfg_scale}_{trainer.global_step:08}.wav'
fakes = fakes.to(torch.float32).div(torch.max(torch.abs(fakes))).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, fakes, self.sample_rate)
log_dict[f'demo_cfg_{cfg_scale}'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'demo_melspec_left_cfg_{cfg_scale}'] = wandb.Image(audio_spectrogram_image(fakes))
trainer.logger.experiment.log(log_dict)
del fakes
except Exception as e:
raise e
finally:
gc.collect()
torch.cuda.empty_cache()
module.train()
class DiffusionCondInpaintDemoCallback(pl.Callback):
def __init__(
self,
demo_dl,
demo_every=2000,
demo_steps=250,
sample_size=65536,
sample_rate=48000,
demo_cfg_scales: tp.Optional[tp.List[int]] = [3, 5, 7]
):
super().__init__()
self.demo_every = demo_every
self.demo_steps = demo_steps
self.demo_samples = sample_size
self.demo_dl = iter(demo_dl)
self.sample_rate = sample_rate
self.demo_cfg_scales = demo_cfg_scales
self.last_demo_step = -1
def on_train_batch_end(self, trainer, module: DiffusionCondTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
self.last_demo_step = trainer.global_step
try:
log_dict = {}
demo_reals, metadata = next(self.demo_dl)
# Remove extra dimension added by WebDataset
if demo_reals.ndim == 4 and demo_reals.shape[0] == 1:
demo_reals = demo_reals[0]
demo_reals = demo_reals.to(module.device)
# Log the real audio
log_dict[f'demo_reals_melspec_left'] = wandb.Image(audio_spectrogram_image(rearrange(demo_reals, "b d n -> d (b n)").mul(32767).to(torch.int16).cpu()))
# log_dict[f'demo_reals'] = wandb.Audio(rearrange(demo_reals, "b d n -> d (b n)").mul(32767).to(torch.int16).cpu(), sample_rate=self.sample_rate, caption="demo reals")
if module.diffusion.pretransform is not None:
module.diffusion.pretransform.to(module.device)
with torch.cuda.amp.autocast():
demo_reals = module.diffusion.pretransform.encode(demo_reals)
demo_samples = demo_reals.shape[2]
# Get conditioning
conditioning = module.diffusion.conditioner(metadata, module.device)
masked_input, mask = module.random_mask(demo_reals, demo_reals.shape[2])
conditioning['inpaint_mask'] = [mask]
conditioning['inpaint_masked_input'] = [masked_input]
if module.diffusion.pretransform is not None:
log_dict[f'demo_masked_input'] = wandb.Image(tokens_spectrogram_image(masked_input.cpu()))
else:
log_dict[f'demo_masked_input'] = wandb.Image(audio_spectrogram_image(rearrange(masked_input, "b c t -> c (b t)").mul(32767).to(torch.int16).cpu()))
cond_inputs = module.diffusion.get_conditioning_inputs(conditioning)
noise = torch.randn([demo_reals.shape[0], module.diffusion.io_channels, demo_samples]).to(module.device)
trainer.logger.experiment.log(log_dict)
for cfg_scale in self.demo_cfg_scales:
print(f"Generating demo for cfg scale {cfg_scale}")
fakes = sample(module.diffusion_ema.model, noise, self.demo_steps, 0, **cond_inputs, cfg_scale=cfg_scale, batch_cfg=True)
if module.diffusion.pretransform is not None:
with torch.cuda.amp.autocast():
fakes = module.diffusion.pretransform.decode(fakes)
# Put the demos together
fakes = rearrange(fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'demo_cfg_{cfg_scale}_{trainer.global_step:08}.wav'
fakes = fakes.to(torch.float32).div(torch.max(torch.abs(fakes))).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, fakes, self.sample_rate)
log_dict[f'demo_cfg_{cfg_scale}'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'demo_melspec_left_cfg_{cfg_scale}'] = wandb.Image(audio_spectrogram_image(fakes))
trainer.logger.experiment.log(log_dict)
except Exception as e:
print(f'{type(e).__name__}: {e}')
raise e
class DiffusionAutoencoderDemoCallback(pl.Callback):
def __init__(
self,
demo_dl,
demo_every=2000,
demo_steps=250,
sample_size=65536,
sample_rate=48000
):
super().__init__()
self.demo_every = demo_every
self.demo_steps = demo_steps
self.demo_samples = sample_size
self.demo_dl = iter(demo_dl)
self.sample_rate = sample_rate
self.last_demo_step = -1
def on_train_batch_end(self, trainer, module: DiffusionAutoencoderTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
self.last_demo_step = trainer.global_step
demo_reals, _ = next(self.demo_dl)
# Remove extra dimension added by WebDataset
if demo_reals.ndim == 4 and demo_reals.shape[0] == 1:
demo_reals = demo_reals[0]
encoder_input = demo_reals
encoder_input = encoder_input.to(module.device)
demo_reals = demo_reals.to(module.device)
with torch.no_grad() and torch.cuda.amp.autocast():
latents = module.diffae_ema.ema_model.encode(encoder_input).float()
fakes = module.diffae_ema.ema_model.decode(latents, steps=self.demo_steps)
#Interleave reals and fakes
reals_fakes = rearrange([demo_reals, fakes], 'i b d n -> (b i) d n')
# Put the demos together
reals_fakes = rearrange(reals_fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'recon_{trainer.global_step:08}.wav'
reals_fakes = reals_fakes.to(torch.float32).div(torch.max(torch.abs(reals_fakes))).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, reals_fakes, self.sample_rate)
log_dict[f'recon'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'embeddings_3dpca'] = pca_point_cloud(latents)
log_dict[f'embeddings_spec'] = wandb.Image(tokens_spectrogram_image(latents))
log_dict[f'recon_melspec_left'] = wandb.Image(audio_spectrogram_image(reals_fakes))
if module.diffae_ema.ema_model.pretransform is not None:
with torch.no_grad() and torch.cuda.amp.autocast():
initial_latents = module.diffae_ema.ema_model.pretransform.encode(encoder_input)
first_stage_fakes = module.diffae_ema.ema_model.pretransform.decode(initial_latents)
first_stage_fakes = rearrange(first_stage_fakes, 'b d n -> d (b n)')
first_stage_fakes = first_stage_fakes.to(torch.float32).mul(32767).to(torch.int16).cpu()
first_stage_filename = f'first_stage_{trainer.global_step:08}.wav'
torchaudio.save(first_stage_filename, first_stage_fakes, self.sample_rate)
log_dict[f'first_stage_latents'] = wandb.Image(tokens_spectrogram_image(initial_latents))
log_dict[f'first_stage'] = wandb.Audio(first_stage_filename,
sample_rate=self.sample_rate,
caption=f'First Stage Reconstructed')
log_dict[f'first_stage_melspec_left'] = wandb.Image(audio_spectrogram_image(first_stage_fakes))
trainer.logger.experiment.log(log_dict)
class DiffusionPriorDemoCallback(pl.Callback):
def __init__(
self,
demo_dl,
demo_every=2000,
demo_steps=250,
sample_size=65536,
sample_rate=48000
):
super().__init__()
self.demo_every = demo_every
self.demo_steps = demo_steps
self.demo_samples = sample_size
self.demo_dl = iter(demo_dl)
self.sample_rate = sample_rate
self.last_demo_step = -1
def on_train_batch_end(self, trainer, module: DiffusionAutoencoderTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
self.last_demo_step = trainer.global_step
demo_reals, metadata = next(self.demo_dl)
# Remove extra dimension added by WebDataset
if demo_reals.ndim == 4 and demo_reals.shape[0] == 1:
demo_reals = demo_reals[0]
demo_reals = demo_reals.to(module.device)
encoder_input = demo_reals
if module.diffusion.conditioner is not None:
with torch.cuda.amp.autocast():
conditioning_tensors = module.diffusion.conditioner(metadata, module.device)
else:
conditioning_tensors = {}
with torch.no_grad() and torch.cuda.amp.autocast():
if module.prior_type == PriorType.MonoToStereo and encoder_input.shape[1] > 1:
source = encoder_input.mean(dim=1, keepdim=True).repeat(1, encoder_input.shape[1], 1).to(module.device)
elif module.prior_type == PriorType.SourceSeparation:
source = create_source_mixture(encoder_input)
if module.diffusion.pretransform is not None:
encoder_input = module.diffusion.pretransform.encode(encoder_input)
source_input = module.diffusion.pretransform.encode(source)
else:
source_input = source
conditioning_tensors['source'] = [source_input]
fakes = sample(module.diffusion_ema.model, torch.randn_like(encoder_input), self.demo_steps, 0, cond=conditioning_tensors)
if module.diffusion.pretransform is not None:
fakes = module.diffusion.pretransform.decode(fakes)
#Interleave reals and fakes
reals_fakes = rearrange([demo_reals, fakes], 'i b d n -> (b i) d n')
# Put the demos together
reals_fakes = rearrange(reals_fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'recon_{trainer.global_step:08}.wav'
reals_fakes = reals_fakes.to(torch.float32).div(torch.max(torch.abs(reals_fakes))).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, reals_fakes, self.sample_rate)
log_dict[f'recon'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'recon_melspec_left'] = wandb.Image(audio_spectrogram_image(reals_fakes))
#Log the source
filename = f'source_{trainer.global_step:08}.wav'
source = rearrange(source, 'b d n -> d (b n)')
source = source.to(torch.float32).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, source, self.sample_rate)
log_dict[f'source'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Source')
log_dict[f'source_melspec_left'] = wandb.Image(audio_spectrogram_image(source))
trainer.logger.experiment.log(log_dict)
class MusicGenDemoCallback(pl.Callback):
def __init__(self,
demo_every=2000,
num_demos=8,
sample_size=65536,
sample_rate=48000,
demo_conditioning: tp.Optional[tp.Dict[str, tp.Any]] = None,
demo_cfg_scales: tp.Optional[tp.List[int]] = [3, 5, 7],
**kwargs
):
super().__init__()
self.demo_every = demo_every
self.num_demos = num_demos
self.demo_samples = sample_size
self.sample_rate = sample_rate
self.last_demo_step = -1
self.demo_conditioning = demo_conditioning
self.demo_cfg_scales = demo_cfg_scales
def on_train_batch_end(self, trainer, module: MusicGenTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
module.eval()
print(f"Generating demo")
self.last_demo_step = trainer.global_step
demo_length_sec = self.demo_samples // self.sample_rate
try:
print("Getting conditioning")
prompts = [md["prompt"][:512] for md in self.demo_conditioning]
for cfg_scale in self.demo_cfg_scales:
module.musicgen_model.set_generation_params(duration=demo_length_sec, cfg_coef=cfg_scale)
with torch.cuda.amp.autocast():
print(f"Generating demo for cfg scale {cfg_scale}")
fakes = module.musicgen_model.generate(prompts, progress=True)
# Put the demos together
fakes = rearrange(fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'demo_cfg_{cfg_scale}_{trainer.global_step:08}.wav'
fakes = fakes.to(torch.float32).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
torchaudio.save(filename, fakes, self.sample_rate)
log_dict[f'demo_cfg_{cfg_scale}'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'demo_melspec_left_cfg_{cfg_scale}'] = wandb.Image(audio_spectrogram_image(fakes))
trainer.logger.experiment.log(log_dict)
except Exception as e:
raise e
finally:
gc.collect()
torch.cuda.empty_cache()
module.train()
class AudioLanguageModelDemoCallback(pl.Callback):
def __init__(self,
demo_every=2000,
num_demos=8,
sample_size=65536,
sample_rate=48000,
demo_conditioning: tp.Optional[tp.Dict[str, tp.Any]] = None,
demo_cfg_scales: tp.Optional[tp.List[int]] = [3, 5, 7],
**kwargs
):
super().__init__()
self.demo_every = demo_every
self.num_demos = num_demos
self.demo_samples = sample_size
self.sample_rate = sample_rate
self.last_demo_step = -1
self.demo_conditioning = demo_conditioning
self.demo_cfg_scales = demo_cfg_scales
def on_train_batch_end(self, trainer, module: AudioLanguageModelTrainingWrapper, outputs, batch, batch_idx):
if (trainer.global_step - 1) % self.demo_every != 0 or self.last_demo_step == trainer.global_step:
return
module.eval()
print(f"Generating demo")
self.last_demo_step = trainer.global_step
demo_length_tokens = self.demo_samples // module.model.pretransform.downsampling_ratio
# demo_reals = batch[0][:self.num_demos]
# if demo_reals.ndim == 4 and demo_reals.shape[0] == 1:
# demo_reals = demo_reals[0]
#demo_reals_tokens = module.model.pretransform.tokenize(demo_reals)
# Limit to first 50 tokens
#demo_reals_tokens = demo_reals_tokens[:, :, :50]
try:
print("Getting conditioning")
for cfg_scale in self.demo_cfg_scales:
model = module.model # module.model_ema.ema_model if module.model_ema is not None else module.model
print(f"Generating demo for cfg scale {cfg_scale}")
fakes = model.generate_audio(
batch_size=self.num_demos,
max_gen_len=demo_length_tokens,
conditioning=self.demo_conditioning,
#init_data = demo_reals_tokens,
cfg_scale=cfg_scale,
temp=1.0,
top_p=0.95
)
# Put the demos together
fakes = rearrange(fakes, 'b d n -> d (b n)')
log_dict = {}
filename = f'demo_cfg_{cfg_scale}_{trainer.global_step:08}.wav'
fakes = fakes.clamp(-1, 1).mul(32766).to(torch.int16).cpu()
torchaudio.save(filename, fakes, self.sample_rate)
log_dict[f'demo_cfg_{cfg_scale}'] = wandb.Audio(filename,
sample_rate=self.sample_rate,
caption=f'Reconstructed')
log_dict[f'demo_melspec_left_cfg_{cfg_scale}'] = wandb.Image(audio_spectrogram_image(fakes))
trainer.logger.experiment.log(log_dict)
except Exception as e:
raise e
finally:
gc.collect()
torch.cuda.empty_cache()
module.train()
def create_demo_callback_from_config(model_config, **kwargs):
model_type = model_config.get('model_type', None)
assert model_type is not None, 'model_type must be specified in model config'
training_config = model_config.get('training', None)
assert training_config is not None, 'training config must be specified in model config'
demo_config = training_config.get("demo", {})
if model_type == 'autoencoder':
from .autoencoders import AutoencoderDemoCallback
return AutoencoderDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
**kwargs
)
elif model_type == 'diffusion_uncond':
from .diffusion import DiffusionUncondDemoCallback
return DiffusionUncondDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
demo_steps=demo_config.get("demo_steps", 250),
sample_rate=model_config["sample_rate"]
)
elif model_type == "diffusion_autoencoder":
from .diffusion import DiffusionAutoencoderDemoCallback
return DiffusionAutoencoderDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
demo_steps=demo_config.get("demo_steps", 250),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
**kwargs
)
elif model_type == "diffusion_prior":
from .diffusion import DiffusionPriorDemoCallback
return DiffusionPriorDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
demo_steps=demo_config.get("demo_steps", 250),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
**kwargs
)
elif model_type == "diffusion_cond":
from .diffusion import DiffusionCondDemoCallback
return DiffusionCondDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
demo_steps=demo_config.get("demo_steps", 250),
num_demos=demo_config["num_demos"],
demo_cfg_scales=demo_config["demo_cfg_scales"],
demo_conditioning=demo_config.get("demo_cond", {}),
demo_cond_from_batch=demo_config.get("demo_cond_from_batch", False),
display_audio_cond=demo_config.get("display_audio_cond", False),
)
elif model_type == "diffusion_cond_inpaint":
from .diffusion import DiffusionCondInpaintDemoCallback
return DiffusionCondInpaintDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
demo_steps=demo_config.get("demo_steps", 250),
demo_cfg_scales=demo_config["demo_cfg_scales"],
**kwargs
)
elif model_type == "musicgen":
from .musicgen import MusicGenDemoCallback
return MusicGenDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
demo_cfg_scales=demo_config["demo_cfg_scales"],
demo_conditioning=demo_config["demo_cond"],
**kwargs
)
elif model_type == "lm":
from .lm import AudioLanguageModelDemoCallback
return AudioLanguageModelDemoCallback(
demo_every=demo_config.get("demo_every", 2000),
sample_size=model_config["sample_size"],
sample_rate=model_config["sample_rate"],
demo_cfg_scales=demo_config.get("demo_cfg_scales", [1]),
demo_conditioning=demo_config.get("demo_cond", None),
num_demos=demo_config.get("num_demos", 8),
**kwargs
)
else:
raise NotImplementedError(f'Unknown model type: {model_type}') | null |
156,902 |
def get_custom_metadata(info, audio):
# Use relative path as the prompt
return {"prompt": info["relpath"]} | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.