import os import cv2 import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl import os import sys os.environ["PYOPENGL_PLATFORM"] = "egl" from pytorch3d.structures import Meshes import pdb from pytorch3d.renderer import ( PointLights, DirectionalLights, PerspectiveCameras, Materials, SoftPhongShader, RasterizationSettings, MeshRenderer, MeshRendererWithFragments, MeshRasterizer, TexturesVertex, ) import torch def vis_keypoints_with_skeleton(img, kps, kps_lines, kp_thresh=0.4, alpha=1): # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. cmap = plt.get_cmap("rainbow") colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)] colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] # Perform the drawing on a copy of the image, to allow for blending. kp_mask = np.copy(img) # Draw the keypoints. for l in range(len(kps_lines)): i1 = kps_lines[l][0] i2 = kps_lines[l][1] p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32) p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32) if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: cv2.line( kp_mask, p1, p2, color=colors[l], thickness=2, lineType=cv2.LINE_AA ) if kps[2, i1] > kp_thresh: cv2.circle( kp_mask, p1, radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA, ) if kps[2, i2] > kp_thresh: cv2.circle( kp_mask, p2, radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA, ) # Blend the keypoints. return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) def vis_keypoints(img, kps, alpha=1): # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. cmap = plt.get_cmap("rainbow") colors = [cmap(i) for i in np.linspace(0, 1, len(kps) + 2)] colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] # Perform the drawing on a copy of the image, to allow for blending. kp_mask = np.copy(img) # Draw the keypoints. for i in range(len(kps)): p = kps[i][0].astype(np.int32), kps[i][1].astype(np.int32) cv2.circle( kp_mask, p, radius=3, color=colors[i], thickness=-1, lineType=cv2.LINE_AA ) # Blend the keypoints. return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) def render_mesh(mesh, face, cam_param, bkg, blend_ratio=1.0, return_bg_mask=False): mesh = mesh.cuda()[None, :, :] face = torch.LongTensor(face.astype(np.int64)).cuda()[None, :, :] cam_param = {k: v.cuda()[None, :] for k, v in cam_param.items()} render_shape = (bkg.shape[0], bkg.shape[1]) # height, width batch_size, vertex_num = mesh.shape[:2] textures = TexturesVertex( verts_features=torch.ones((batch_size, vertex_num, 3)).float().cuda() ) mesh = torch.stack( (-mesh[:, :, 0], -mesh[:, :, 1], mesh[:, :, 2]), 2 ) # reverse x- and y-axis following PyTorch3D axis direction mesh = Meshes(mesh, face, textures) cameras = PerspectiveCameras( focal_length=cam_param["focal"], principal_point=cam_param["princpt"], device="cuda", in_ndc=False, image_size=torch.LongTensor(render_shape).cuda().view(1, 2), ) raster_settings = RasterizationSettings( image_size=render_shape, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings).cuda() lights = PointLights(device="cuda") shader = SoftPhongShader(device="cuda", cameras=cameras, lights=lights) materials = Materials( device="cuda", specular_color=[[0.0, 0.0, 0.0]], shininess=0.0 ) # render with torch.no_grad(): renderer = MeshRendererWithFragments(rasterizer=rasterizer, shader=shader) images, fragments = renderer(mesh, materials=materials) # background masking is_bkg = (fragments.zbuf <= 0).float().cpu().numpy()[0] render = images[0, :, :, :3].cpu().numpy() fg = render * blend_ratio + bkg / 255 * (1 - blend_ratio) render = fg * (1 - is_bkg) * 255 + bkg * is_bkg if return_bg_mask: return render, is_bkg return render