savoji's picture
Add files using upload-large-folder tool
b24c748 verified
import torch
import torch.nn as nn
from torch.nn import functional as F
import open3d as o3d
import trimesh
import copy
import time
import cv2
import logging
import numpy as np
import pytorch3d
from pytorch3d.io import load_objs_as_meshes, load_obj
from pytorch3d.structures import Meshes
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib
from pytorch3d.renderer import (
PerspectiveCameras,
PointLights,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
)
from transformers import AutoTokenizer, AutoImageProcessor, AutoModel
from transformers import AutoProcessor, CLIPVisionModelWithProjection
from transformers import CLIPProcessor, CLIPModel
from sklearn.metrics.pairwise import cosine_similarity
import ssl
import os
os.environ['CURL_CA_BUNDLE'] = ''
ssl._create_default_https_context = ssl._create_unverified_context
from pointnet2_utils import (
gather_operation,
furthest_point_sample,
)
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
def sample_pts_feats(pts, feats, npoint=2048, return_index=False):
'''
pts: B*N*3
feats: B*N*C
'''
sample_idx = furthest_point_sample(pts, npoint)
pts = gather_operation(pts.transpose(1,2).contiguous(), sample_idx)
pts = pts.transpose(1,2).contiguous()
feats = gather_operation(feats.transpose(1,2).contiguous(), sample_idx)
feats = feats.transpose(1,2).contiguous()
if return_index:
return pts, feats, sample_idx
else:
return pts, feats
def get_chosen_pixel_feats(img, choose):
shape = img.size()
if len(shape) == 3:
pass
elif len(shape) == 4:
B, C, H, W = shape
img = img.reshape(B, C, H*W)
else:
assert False
choose = choose.unsqueeze(1).repeat(1, C, 1)
x = torch.gather(img, 2, choose).contiguous()
return x.transpose(1,2).contiguous()
def pairwise_distance(
x: torch.Tensor, y: torch.Tensor, normalized: bool = False, channel_first: bool = False
) -> torch.Tensor:
r"""Pairwise distance of two (batched) point clouds.
Args:
x (Tensor): (*, N, C) or (*, C, N)
y (Tensor): (*, M, C) or (*, C, M)
normalized (bool=False): if the points are normalized, we have "x2 + y2 = 1", so "d2 = 2 - 2xy".
channel_first (bool=False): if True, the points shape is (*, C, N).
Returns:
dist: torch.Tensor (*, N, M)
"""
if channel_first:
channel_dim = -2
xy = torch.matmul(x.transpose(-1, -2), y) # [(*, C, N) -> (*, N, C)] x (*, C, M)
else:
channel_dim = -1
xy = torch.matmul(x, y.transpose(-1, -2)) # (*, N, C) x [(*, M, C) -> (*, C, M)]
if normalized:
sq_distances = 2.0 - 2.0 * xy
else:
x2 = torch.sum(x ** 2, dim=channel_dim).unsqueeze(-1) # (*, N, C) or (*, C, N) -> (*, N) -> (*, N, 1)
y2 = torch.sum(y ** 2, dim=channel_dim).unsqueeze(-2) # (*, M, C) or (*, C, M) -> (*, M) -> (*, 1, M)
sq_distances = x2 - 2 * xy + y2
sq_distances = sq_distances.clamp(min=0.0)
return sq_distances
def compute_feature_similarity(feat1, feat2, type='cosine', temp=1.0, normalize_feat=True):
r'''
Args:
feat1 (Tensor): (B, N, C)
feat2 (Tensor): (B, M, C)
Returns:
atten_mat (Tensor): (B, N, M)
'''
if normalize_feat:
feat1 = F.normalize(feat1, p=2, dim=2)
feat2 = F.normalize(feat2, p=2, dim=2)
if type == 'cosine':
atten_mat = feat1 @ feat2.transpose(1,2)
elif type == 'L2':
atten_mat = torch.sqrt(pairwise_distance(feat1, feat2))
else:
assert False
atten_mat = atten_mat / temp
return atten_mat
def compute_triangle_normals(pts):
pts = pts.squeeze(0)
A = pts[:, 1] - pts[:, 0] # (6000, 3)
B = pts[:, 2] - pts[:, 0] # (6000, 3)
N = torch.cross(A, B, dim=1)
normal_magnitude = torch.norm(N, dim=1, keepdim=True)
return normal_magnitude.unsqueeze(0)
def aug_pose_noise(gt_r, gt_t,
std_rots=[15, 10, 5, 1.25, 1],
max_rot=45,
sel_std_trans=[0.2, 0.2, 0.2],
max_trans=0.8):
B = gt_r.size(0)
device = gt_r.device
std_rot = np.random.choice(std_rots)
angles = torch.normal(mean=0, std=std_rot, size=(B, 3)).to(device=device)
angles = angles.clamp(min=-max_rot, max=max_rot)
ones = gt_r.new(B, 1, 1).zero_() + 1
zeros = gt_r.new(B, 1, 1).zero_()
a1 = angles[:,0].reshape(B, 1, 1) * np.pi / 180.0
a1 = torch.cat(
[torch.cat([torch.cos(a1), -torch.sin(a1), zeros], dim=2),
torch.cat([torch.sin(a1), torch.cos(a1), zeros], dim=2),
torch.cat([zeros, zeros, ones], dim=2)], dim=1
)
a2 = angles[:,1].reshape(B, 1, 1) * np.pi / 180.0
a2 = torch.cat(
[torch.cat([ones, zeros, zeros], dim=2),
torch.cat([zeros, torch.cos(a2), -torch.sin(a2)], dim=2),
torch.cat([zeros, torch.sin(a2), torch.cos(a2)], dim=2)], dim=1
)
a3 = angles[:,2].reshape(B, 1, 1) * np.pi / 180.0
a3 = torch.cat(
[torch.cat([torch.cos(a3), zeros, torch.sin(a3)], dim=2),
torch.cat([zeros, ones, zeros], dim=2),
torch.cat([-torch.sin(a3), zeros, torch.cos(a3)], dim=2)], dim=1
)
rand_rot = a1 @ a2 @ a3
rand_trans = torch.normal(
mean=torch.zeros([B, 3]).to(device),
std=torch.tensor(sel_std_trans, device=device).view(1, 3),
)
rand_trans = torch.clamp(rand_trans, min=-max_trans, max=max_trans)
rand_rot = gt_r @ rand_rot
rand_trans = gt_t + rand_trans
rand_trans[:,2] = torch.clamp(rand_trans[:,2], min=1e-6)
return rand_rot.detach(), rand_trans.detach()
def compute_coarse_Rt(
end_points,
atten,
pts1,
pts2,
depth,
radius,
mask,
bbox,
model_pts=None,
n_proposal1=6000,
n_proposal2=300,
):
WSVD = WeightedProcrustes()
B, N1, _ = pts1.size()
N2 = pts2.size(1)
device = pts1.device
# compute soft assignment matrix
pred_score = torch.softmax(atten, dim=2) * torch.softmax(atten, dim=1)
pred_label1 = torch.max(pred_score[:,1:,:], dim=2)[1]
pred_label2 = torch.max(pred_score[:,:,1:], dim=1)[1]
weights1 = (pred_label1>0).float()
weights2 = (pred_label2>0).float()
pred_score = pred_score[:, 1:, 1:].contiguous()
pred_score = pred_score * weights1.unsqueeze(2) * weights2.unsqueeze(1)
pred_score = pred_score.reshape(B, N1*N2) ** 1.5
# sample pose hypothese
cumsum_weights = torch.cumsum(pred_score, dim=1)
cumsum_weights /= (cumsum_weights[:, -1].unsqueeze(1).contiguous()+1e-8)
idx = torch.searchsorted(cumsum_weights, torch.rand(B, n_proposal1*3, device=device))
idx1, idx2 = idx.div(N2, rounding_mode='floor'), idx % N2
idx1 = torch.clamp(idx1, max=N1-1).unsqueeze(2).repeat(1,1,3)
idx2 = torch.clamp(idx2, max=N2-1).unsqueeze(2).repeat(1,1,3)
p1 = torch.gather(pts1, 1, idx1).reshape(B,n_proposal1,3,3).reshape(B*n_proposal1,3,3)
p2 = torch.gather(pts2, 1, idx2).reshape(B,n_proposal1,3,3).reshape(B*n_proposal1,3,3)
# # visualize the correspondences
# rgb_img = end_points['rgb_ori'].cpu().numpy()
# depth_img = end_points['depth'].cpu().numpy() * 1000
# intrinsic = end_points['K'].reshape(3,3)
# rgb = o3d.geometry.Image((rgb_img).astype(np.uint8))
# depth = o3d.geometry.Image((depth_img).astype(np.uint16))
# rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, depth_scale=1000.0)
# width = rgb_img.shape[1]
# height = rgb_img.shape[0]
# cx = int(intrinsic[0,2])
# cy = int(intrinsic[1,2])
# fx = int(intrinsic[0,0])
# fy = int(intrinsic[1,1])
# intri = o3d.camera.PinholeCameraIntrinsic(width=width, height=height, fx=fx, fy=fy, cx=cx, cy=cy)
# o3d_points = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, intrinsic=intri)
# pcd1 = o3d.geometry.PointCloud()
# o3d.visualization.draw_geometries([o3d_points])
# template_list = [
# [[1., 0., 0.,], [0., 1., 0.], [0., 0., 1.]], # identity
# # [[-1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], # mirror along x
# # [[1., 0., 0.], [0., -1., 0.], [0., 0., 1.]], # mirror along y
# # [[1., 0., 0.], [0., 1., 0.], [0., 0., -1.]], # mirror along z
# [[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]], # 90 around x
# # [[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]], # -90 around x
# [[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]], # 90 around y
# # [[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]], # -90 around y
# [[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]], # 90 around z
# # [[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]] # -90 around z
# ]
# template_tensor = torch.tensor(template_list, device='cuda')
# template_tensor = template_tensor.unsqueeze(0).repeat(n_proposal1,1,1,1).reshape(-1,3,3)
pred_rs, pred_ts = WSVD(p2, p1, None)
pred_rs = pred_rs.reshape(B, n_proposal1, 3, 3)
pred_ts = pred_ts.reshape(B, n_proposal1, 1, 3)
p1 = p1.reshape(B, n_proposal1, 3, 3)
p2 = p2.reshape(B, n_proposal1, 3, 3)
# original dis from sam6d
dis = torch.norm((p1 - pred_ts) @ pred_rs - p2, dim=3).mean(2)
# add dis from normal vector
# mesh = end_points['mesh']
# sample_pts = torch.tensor(trimesh.sample.sample_surface(mesh, 5000)[0], device = 'cuda',dtype=torch.float32)
# breakpoint()
# transformed_sample_pts = (pred_rs @ sample_pts.reshape(-1,3).T).T.reshape(-1,3) + pred_ts* (radius + 1e-6)
# mesh_pts = (p1 - pred_ts) @ pred_rs
# mesh_normal = compute_triangle_normals(mesh_pts)
# obs_normal = compute_triangle_normals(p2)
# eps=1e-6
# cos_sim = torch.sum(mesh_normal * obs_normal, dim=2).clamp(-1.0 + eps, 1.0 - eps)
# angle =(cos_sim - cos_sim.min()) / (cos_sim.max() - cos_sim.min() + 1e-8) # shape (1, 6000)
# dis_combien = (dis + angle)/2
## customize dis
mesh = end_points['mesh']
K = end_points['K'].reshape(3,3).to(dtype=torch.float64)
# only project center of CAD model and make sure its depths is not overlimit
center = torch.tensor(mesh.centroid, device = 'cuda')
transformed_center = center.reshape(1,1,1,3) + pred_ts* (radius.reshape(-1, 1, 1) + 1e-6)
Z = transformed_center[..., 2].clamp(min=1e-6)
point_2d = (K @ transformed_center.squeeze().T )
point_2d[0] = (point_2d[0]/point_2d[2]).int()
point_2d[1] = (point_2d[1]/point_2d[2] ).int()
x = point_2d[0].int()
y = point_2d[1].int()
depth_center = torch.tensor(0.).repeat(n_proposal1).unsqueeze(0).cuda()
for i in range(n_proposal1):
try:
depth_center[0,i] = depth[y[i].item(), x[i].item()]
except IndexError:
depth_center[0,i] = -1
depth_gap = Z.flatten() - depth_center.flatten()
## given that prediction center deoth should always be larger than gt depth
# idx_depth = torch.where(depth_gap > 0)[0]
# if len(idx_depth) == 0:
# print('all hypothesis are not correct')
# pred_R = torch.eye(3).unsqueeze(0).cuda()
# pred_t = torch.zeros(1,3).cuda()
# return pred_R,pred_t,None
idx_dis = torch.topk(dis, n_proposal2, dim=1, largest=False)[1].sort()[0]
# idx = torch.tensor(np.intersect1d(idx_dis.cpu().numpy(), idx_depth.cpu().numpy())).cuda()
# if len(idx) == 0:
# print('no good selection')
# pred_R = torch.eye(3).unsqueeze(0).cuda()
# pred_t = torch.zeros(1,3).cuda()
# return pred_R,pred_t,None
idx = idx_dis
idx = idx.squeeze(0)
pred_rs = torch.gather(pred_rs, 1, idx.reshape(B,idx.shape[0],1,1).repeat(1,1,3,3))
pred_ts = torch.gather(pred_ts, 1, idx.reshape(B,idx.shape[0],1,1).repeat(1,1,1,3))
p1 = torch.gather(p1, 1, idx.reshape(B,idx.shape[0],1,1).repeat(1,1,3,3))
p2 = torch.gather(p2, 1, idx.reshape(B,idx.shape[0],1,1).repeat(1,1,3,3))
# # # pose selection
transformed_pts = (pts1.unsqueeze(1) - pred_ts) @ pred_rs
transformed_pts = transformed_pts.reshape(B*idx.shape[0], -1, 3)
if model_pts is None:
model_pts = pts2
expand_model_pts = model_pts.unsqueeze(1).repeat(1,idx.shape[0],1,1).reshape(B*idx.shape[0], -1, 3)
dis = torch.sqrt(pairwise_distance(transformed_pts, expand_model_pts))
dis = dis.min(2)[0].reshape(B, idx.shape[0], -1)
scores = weights1.unsqueeze(1).sum(2) / ((dis * weights1.unsqueeze(1)).sum(2) +1e-8)
# add pred-depth vs gt depth comparison score
# cad_points = pts1.reshape(-1,3)
# # pcd = o3d.geometry.PointCloud()
# # pcd.points = o3d.utility.Vector3dVector(cad_points)
# # o3d.visualization.draw_geometries([pcd])
# scene_pts = pred_ts * (cad_points.reshape(1,1,-1,3)) + pred_ts* (radius + 1e-6)
# rgb_img = end_points['rgb_ori'].to(torch.uint8)
# depth_img = end_points['depth']* 1000
# rgb = o3d.geometry.Image(rgb_img.cpu().numpy())
# depth = o3d.geometry.Image(depth_img.cpu().numpy())
# rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, depth_scale=1000)
# width = rgb_img.shape[1]
# height = rgb_img.shape[0]
# cx = int(K[0,2])
# cy = int(K[1,2])
# fx = int(K[0,0])
# fy = int(K[1,1])
# intri = o3d.camera.PinholeCameraIntrinsic(width=width, height=height, fx=fx, fy=fy, cx=cx, cy=cy)
# o3d_points = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, intrinsic=intri)
# scene_points = np.array(o3d_points.points)
# cad_points = torch.tensor(trimesh.sample.volume_mesh(mesh, count=500), device='cuda')
# cad_points_exp = cad_points.unsqueeze(0).unsqueeze(0)
# pred_rs_exp = pred_rs.unsqueeze(2)
# cad_points_trans = cad_points_exp.transpose(-1, -2)
# cad_points = torch.matmul(pred_rs_exp.float(), cad_points_trans.float())
# cad_points = cad_points.squeeze(2).transpose(-1, -2)
# cad_points += pred_ts * radius
# cad_points[...,2] = cad_points[...,2] + 1e-6 # avoid divide by 0
# x = cad_points[..., 0] / cad_points[..., 2]
# y = cad_points[..., 1] / cad_points[..., 2]
# U = K[0, 0] * x + K[0, 2]
# V = K[1, 1] * y + K[1, 2]
# in_bound = (U >= 0) & (U < width) & (V >= 0) & (V < height)
# cad_points_filtered = cad_points.clone()
# cad_points_filtered[~in_bound] = np.nan
# grid_size = 0.01
# x, y, z = scene_points[:, 0], scene_points[:, 1], scene_points[:, 2]
# x_min, y_min = x.min(), y.min()
# x_idx = ((x - x_min) / grid_size).astype(int)
# y_idx = ((y - y_min) / grid_size).astype(int)
# from collections import defaultdict
# grid = defaultdict(list)
# for xi, yi, zi in zip(x_idx, y_idx, z):
# grid[(xi, yi)].append(zi)
# averaged_points = []
# for (xi, yi), z_vals in grid.items():
# x_center = x_min + (xi + 0.5) * grid_size
# y_center = y_min + (yi + 0.5) * grid_size
# z_min = np.min(z_vals)
# averaged_points.append([x_center, y_center, z_min])
# averaged_points = np.array(averaged_points)
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(averaged_points)
# pcd.paint_uniform_color([1, 0., 0.])
# pcd2 = o3d.geometry.PointCloud()
# pcd2.points = o3d.utility.Vector3dVector(cad_points_filtered[0,0,:,:].cpu().numpy())
# pcd2.paint_uniform_color([0, 0.1, 1.])
# o3d.visualization.draw_geometries([pcd,pcd2,o3d_points])
# Step 1: Build the pillar lookup dictionary
# x_min = averaged_points[:, 0].min()
# y_min = averaged_points[:, 1].min()
# pillar_dict = {}
# for x, y, z in averaged_points:
# xi = int((x - x_min) / grid_size)
# yi = int((y - y_min) / grid_size)
# pillar_dict[(xi, yi)] = z
# qp_np = cad_points_filtered.squeeze(0).cpu().numpy()
# # qp_np = cad_points.squeeze(0).cpu().numpy()
# depth_diffs = np.full((qp_np.shape[0], qp_np.shape[1]), np.nan)
# for i in range(qp_np.shape[0]):
# for j in range(qp_np.shape[1]):
# x, y, z = qp_np[i, j]
# if np.isnan(x) or np.isnan(y) or np.isnan(z):
# continue
# xi = int((x - x_min) / grid_size)
# yi = int((y - y_min) / grid_size)
# key = (xi, yi)
# if key in pillar_dict:
# z_pillar = pillar_dict[key]
# depth_diffs[i, j] = z - z_pillar
# depth_diffs = torch.tensor(depth_diffs)
# negative_mask = depth_diffs < 0
# neg_diffs_only = torch.where(negative_mask, depth_diffs, torch.zeros_like(depth_diffs))
# depth_scores = torch.abs(neg_diffs_only.sum(dim=1))
# depth_scores = depth_scores / (depth_scores.sum() + 1e-8)
# depth_scores *= 1000
# points = torch.tensor(mesh.sample(1000, return_index=False), device='cuda')
# obs_pts = pred_ts * (points.reshape(1,1,-1,3)) + pred_ts* (radius + 1e-6)
# obs_depth = obs_pts[..., 2].clamp(min=1e-6)
# obs_img_pts = (K.unsqueeze(0).repeat(len(idx), 1, 1) @ obs_pts.squeeze().permute(0, 2, 1) ).permute(0,2,1)
# obs_img_pts[:,:,0] = (obs_img_pts[:,:,0]/obs_img_pts[:,:,0]).int()
# obs_img_pts[:,:,1] = (obs_img_pts[:,:,1]/obs_img_pts[:,:,2] ).int()
# x = point_2d[0].int()
# y = point_2d[1].int()
sorted_x, indices = torch.sort(scores,descending=True)
idx = indices[0,0]
# print('-----------------',idx,scores[0,idx])
pred_R = torch.gather(pred_rs, 1, idx.reshape(B,1,1,1).repeat(1,1,3,3)).squeeze(1)
pred_t = torch.gather(pred_ts, 1, idx.reshape(B,1,1,1).repeat(1,1,1,3)).squeeze(2).squeeze(1)
pose = np.eye(4)
pose[:3,:3] = pred_R.cpu().numpy()
pose[:3,3] = pred_t.cpu().numpy() * radius[0].cpu().numpy()
mesh_o3d = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(mesh.vertices), o3d.utility.Vector3iVector(mesh.faces))
pcd_obj = mesh_o3d.sample_points_uniformly(number_of_points=1000)
pcd_obj_trans = copy.deepcopy(pcd_obj).transform(pose)
pcd_obj_trans.paint_uniform_color([0.1, 0.1, 1])
if 0:
scores = scores - depth_scores.cuda()
sorted_x, indices = torch.sort(scores,descending=True)
idx = indices[0,1]
idx = torch.tensor(14)
# print('-----------------',idx,scores[0,idx])
pred_R = torch.gather(pred_rs, 1, idx.reshape(B,1,1,1).repeat(1,1,3,3)).squeeze(1)
pred_t = torch.gather(pred_ts, 1, idx.reshape(B,1,1,1).repeat(1,1,1,3)).squeeze(2).squeeze(1)
pose = np.eye(4)
pose[:3,:3] = pred_R.cpu().numpy()
pose[:3,3] = pred_t.cpu().numpy() * radius[0].cpu().numpy()
mesh_o3d = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(mesh.vertices), o3d.utility.Vector3iVector(mesh.faces))
pcd_obj2 = mesh_o3d.sample_points_uniformly(number_of_points=1000)
pcd_obj_trans2 = copy.deepcopy(pcd_obj2).transform(pose)
pcd_obj_trans2.paint_uniform_color([1, 0.1, 0.1])
o3d.visualization.draw_geometries([pcd_obj_trans,pcd_obj_trans2,o3d_points])
breakpoint()
#vis the center
# sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.02)
# sphere.translate()
# sphere.paint_uniform_color([1, 0, 0])
# pcd2 = o3d.geometry.PointCloud()
# pcd2.points = o3d.utility.Vector3dVector(qp_np[10])
# pcd2.paint_uniform_color([0.1, 0.1, 1])
# o3d.visualization.draw_geometries([pcd,pcd2,o3d_points])
# approach 2: try project cad points onto 2D img and compare with pillarized gt_depth
if 0:
grid_size = 5
depth_img = end_points['depth']* 1000
height, width = depth_img.shape
H_trim = height - height % grid_size
W_trim = width - width % grid_size
depth_trimmed = depth_img[:H_trim, :W_trim]
reshaped = depth_trimmed.reshape(
H_trim // grid_size, grid_size,
W_trim // grid_size, grid_size
)
reshaped = reshaped.permute(0, 2, 1, 3)
# min_pillars = reshaped.min(dim=3).values.min(dim=2).values
n_y, n_x = reshaped.shape[:2]
pillar_dict = {}
for yi in range(n_y):
for xi in range(n_x):
block = reshaped[yi, xi]
min_val = torch.min(block)
pillar_dict[(xi, yi)] = min_val.item()
# cad_points = torch.tensor(trimesh.sample.volume_mesh(mesh, count=1000), device='cuda')
cad_points = torch.tensor(mesh.sample(1000), device='cuda')
cad_points = torch.matmul( pred_rs.squeeze(0).float(), cad_points.T.float())
cad_points = cad_points.transpose(-1, -2)
cad_points_trans = cad_points + pred_ts.squeeze(0) * radius
cad_points_trans[...,2] = cad_points_trans[...,2] + 1e-6 # avoid divide by 0
x = cad_points_trans[..., 0] / cad_points_trans[..., 2]
y = cad_points_trans[..., 1] / cad_points_trans[..., 2]
U = K[0, 0] * x + K[0, 2]
V = K[1, 1] * y + K[1, 2]
depth_diffs = np.full((cad_points_trans.shape[0], cad_points_trans.shape[1]), np.nan)
for i in range(U.shape[0]):
for j in range(U.shape[1]):
x,y = U[i,j], V[i,j]
z = cad_points_trans[i,j, 2]
xi = int(x/grid_size)
yi = int(y/grid_size)
key = (xi, yi)
if key in pillar_dict:
z_pillar = pillar_dict[key]
depth_diffs[i, j] = z - z_pillar
depth_diffs = torch.tensor(depth_diffs)
negative_mask = depth_diffs < 0
neg_diffs_only = torch.where(negative_mask, depth_diffs, torch.zeros_like(depth_diffs))
depth_scores = torch.abs(neg_diffs_only.sum(dim=1))
depth_scores = depth_scores / (depth_scores.sum() + 1e-8)
depth_scores *= 1000
scores = weights1.unsqueeze(1).sum(2) / ((dis * weights1.unsqueeze(1)).sum(2) +1e-8)
scores = scores - (depth_scores).cuda()
sorted_x, indices = torch.sort(scores,descending=True)
idx = indices[0,0]
# print('-----------------',idx,scores[0,idx])
pred_R = torch.gather(pred_rs, 1, idx.reshape(B,1,1,1).repeat(1,1,3,3)).squeeze(1)
pred_t = torch.gather(pred_ts, 1, idx.reshape(B,1,1,1).repeat(1,1,1,3)).squeeze(2).squeeze(1)
pose = np.eye(4)
pose[:3,:3] = pred_R.cpu().numpy()
pose[:3,3] = pred_t.cpu().numpy() * radius[0].cpu().numpy()
mesh_o3d = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(mesh.vertices), o3d.utility.Vector3iVector(mesh.faces))
pcd_obj2 = mesh_o3d.sample_points_uniformly(number_of_points=1000)
pcd_obj_trans2 = copy.deepcopy(pcd_obj2).transform(pose)
pcd_obj_trans2.paint_uniform_color([1, 0.1, 0.1])
# o3d.visualization.draw_geometries([pcd_obj_trans,pcd_obj_trans2,o3d_points])
# breakpoint()
# pcd3 = o3d.geometry.PointCloud()
# pcd3.points = o3d.utility.Vector3dVector(cad_points_trans[0].cpu().numpy())
# pcd3.paint_uniform_color([0.1, 0.1, 1])
# o3d.visualization.draw_geometries([pcd,pcd3,o3d_points])
# breakpoint()
# p1 = torch.gather(p1, 1, idx.reshape(B,1,1,1).repeat(1,1,3,3)).squeeze(1)
# p2 = torch.gather(p2, 1, idx.reshape(B,1,1,1).repeat(1,1,3,3)).squeeze(1)
# p1 = (p1 - pred_t *(radius.reshape(-1, 1, 1) + 1e-6)) @ pred_R
# points = p1.cpu().numpy().reshape(-1,3)
# points = p2.cpu().numpy().reshape(-1,3)
# p1 = p1.repeat(4,1,1).unsqueeze(0)
# p2 = p2.repeat(4,1,1).unsqueeze(0)
# pred_R_template = (pred_R @ template_tensor)
# pcd = torch.tensor(mesh.sample(1000), device='cuda')
# points_h = torch.hstack([pcd, torch.ones((pcd.shape[0], 1)).cuda()])
# pose = torch.eye(4).type(torch.float64).cuda()
# pose[:3,:3] = pred_R[0]
# pose[:3,3] = pred_t[0]
# points_transformed_h = (pose @ points_h.cuda().T).T
# points_transformed = points_transformed_h[:, :3]
# mask = end_points['mask_ori']/255
# rgb_img = end_points['rgb_ori']
# depth_img = end_points['depth'] * 1000
# rgb_img = rgb_img * mask[:,:,None]
# depth_img = depth_img * mask
# rgb = o3d.geometry.Image(rgb_img.cpu().numpy())
# depth = o3d.geometry.Image(depth_img.cpu().numpy())
# rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(rgb, depth, depth_scale=1000)
# width = rgb_img.shape[1]
# height = rgb_img.shape[0]
# cx = int(K[0,2])
# cy = int(K[1,2])
# fx = int(K[0,0])
# fy = int(K[1,1])
# intri = o3d.camera.PinholeCameraIntrinsic(width=width, height=height, fx=fx, fy=fy, cx=cx, cy=cy)
# o3d_points = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, intrinsic=intri)
# o3d_points.translate(-o3d_points.get_center())
# breakpoint()
# o3d.visualization.draw_geometries([o3d_points])
# pred_t = pred_t.repeat(4, 1).reshape(1,-1,1,3)
# p1 = (p1 - pred_t) @ pred_R
# mesh_normal = compute_triangle_normals(p1)
# obs_normal = compute_triangle_normals(p2)
# cos_sim = torch.sum(mesh_normal * obs_normal, dim=2).clamp(-1.0 + eps, 1.0 - eps)
# angle =(cos_sim - cos_sim.min()) / (cos_sim.max() - cos_sim.min() + 1e-8) # shape (1, 6000)
# add rot error
# device = 'cpu'
# mesh = load_objs_as_meshes(['/workspace/cad_model/box_02/box_02.obj'], device=device)
# meshes = mesh.extend(10)
# fx, fy = K[0,0].item(), K[1,1].item()
# cx, cy = K[0,2].item(), K[1,2].item()
# h = 480
# w = 640
# image_size = torch.tensor([[480, 640]])
# RT = torch.eye(4)
# RT[3,3] = 1
# RT[:3,:3] = pred_R.reshape(3,3)
# RT[:3,3] = pred_t * (radius.reshape(-1, 1, 1) + 1e-6)
# # transfom axis to pytorch3d format
# Rz = torch.tensor([[-1,0, 0, 0],
# [0, -1, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]]).float()
# RT = torch.matmul(Rz, RT)
# template_pose_rot = [
# torch.tensor([[[1., 0., 0.,], [0., 1., 0.], [0., 0., 1.]]], device = 'cuda'), # identity
# torch.tensor([[[-1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]], device = 'cuda'), # mirror along x
# torch.tensor([[[1., 0., 0.], [0., -1., 0.], [0., 0., 1.]]], device = 'cuda'), # mirror along y
# torch.tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., -1.]]], device = 'cuda'), # mirror along z
# torch.tensor([[[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]]], device = 'cuda'), # 90 around x
# torch.tensor([[[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]]], device = 'cuda'), # -90 around x
# torch.tensor([[[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]]], device = 'cuda'), # 90 around y
# torch.tensor([[[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]]], device = 'cuda'), # -90 around y
# torch.tensor([[[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]]], device = 'cuda'), # 90 around z
# torch.tensor([[[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]]], device = 'cuda') # -90 around z
# ]
# rot_tensor = torch.cat(template_pose_rot, dim=0) # shape (10,3,3)
# base_rot = RT[:3, :3]
# new_rot = base_rot.unsqueeze(0) @ rot_tensor.cpu()
# RT_batch = RT.unsqueeze(0).repeat(10, 1, 1)
# RT_batch[:, :3, :3] = new_rot
# R = torch.transpose(RT_batch[:,:3, :3], 1, 2).reshape(-1, 3, 3).detach()
# T = RT_batch[:,:3,3].reshape(-1,3).detach()
# f = torch.tensor((fx, fy), dtype=torch.float32).unsqueeze(0)
# p = torch.tensor((cx, cy), dtype=torch.float32).unsqueeze(0)
# cameras = PerspectiveCameras(
# R = R,
# T = T,
# focal_length=f,
# principal_point=p,
# image_size=image_size,
# in_ndc=False,
# device="cpu")
# raster_settings = RasterizationSettings(
# image_size=(h,w),
# blur_radius=0.0,
# faces_per_pixel=10,
# )
# rasterizer = MeshRasterizer(
# cameras=cameras,
# raster_settings=raster_settings
# )
# lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# renderer = MeshRenderer(
# rasterizer=rasterizer,
# shader=SoftPhongShader(
# device=device,
# cameras=cameras,
# lights=lights
# )
# )
# fragments = rasterizer(meshes)
# # depths_render = fragments.zbuf.detach().cpu().numpy()
# depths_render = fragments.zbuf[..., 0].detach().cpu().numpy()
# images = np.array(renderer(meshes))
# images = images[:, ..., :3]
# emb_list = []
# depth_emb_list = []
# y1, y2, x1, x2 = end_points['bbox']
# for i in range(images.shape[0]):
# img = images[i]
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# img = (img*255).astype(np.int32)
# img = img[y1.int().item():y2.int().item(), x1.int().item():x2.int().item()]
# depth_render = depths_render[i][y1.int().item():y2.int().item(), x1.int().item():x2.int().item()]
# if len(depth_render.shape) != 3:
# depth_render = np.expand_dims(depth_render, axis=-1)
# depth_render = np.repeat(depth_render*1000, 3, axis=-1).astype(np.uint8)
# depth_render_emb = compute_embedding(depth_render)
# render_emb = compute_embedding(img)
# emb_list.append(render_emb)
# depth_emb_list.append(depth_render_emb)
# rgb = end_points['rgb_ori'].cpu().numpy()[y1.int().item():y2.int().item(), x1.int().item():x2.int().item()]
# depth_crop = depth.unsqueeze(-1).cpu().numpy()[y1.int().item():y2.int().item(), x1.int().item():x2.int().item()]
# depth_crop = np.repeat(depth_crop*1000, 3, axis=-1).astype(np.uint8)
# depth_emb = compute_embedding(depth_crop)
# # get vit embedding of two images (rendering and observation) and compute cosine similarity
# # rgb = np.concatenate([rgb, depth_crop], axis=-1)
# rgb_emb = compute_embedding(rgb)
# score_sim_rgb = []
# score_sim_depth = []
# for idx, emb in enumerate(emb_list):
# similarity = cosine_similarity(emb.reshape(1,-1).cpu().numpy(), rgb_emb.reshape(1,-1).cpu().numpy())[0][0]
# sim_depth = cosine_similarity(depth_emb_list[idx].reshape(1,-1).cpu().numpy(), depth_emb.reshape(1,-1).cpu().numpy())[0][0]
# score_sim_rgb.append(similarity)
# score_sim_depth.append(sim_depth)
# # cv2.imwrite('a.png', images[0])
# final_score = [(a + b) / 2 for a, b in zip(score_sim_rgb, score_sim_depth)]
# max_index = final_score.index(max(final_score))
# print('-------------->',max_index)
# pred_R = pred_R @ template_pose_rot[max_index]
# pred_R = pred_R[idx].unsqueeze(0)
return pred_R, pred_t, _
# return pred_R, pred_t, transformed_center.reshape(6000,3).cpu().numpy()
# return pred_R, pred_t, (pred_ts_filter* (radius.reshape(-1, 1, 1) + 1e-6)).cpu().numpy().reshape(-1,3)
# return pred_R, pred_t, (pred_ts_out* (radius.reshape(-1, 1, 1) + 1e-6)).cpu().numpy().reshape(-1,3)
# return pred_R, pred_t, (pred_ts_filter* (radius.reshape(-1, 1, 1) + 1e-6)).cpu().numpy().reshape(-1,3)
# return pred_R.unsqueeze(0), pred_t.unsqueeze(0).unsqueeze(0), (pred_t* (radius.reshape(-1, 1, 1) + 1e-6)).cpu().numpy().reshape(-1,3)
def compute_fine_Rt(
atten,
pts1,
pts2,
radius,
end_points,
model_pts=None,
dis_thres=0.15
):
if model_pts is None:
model_pts = pts2
# compute pose
WSVD = WeightedProcrustes(weight_thresh=0.0)
assginment_mat = torch.softmax(atten, dim=2) * torch.softmax(atten, dim=1)
label1 = torch.max(assginment_mat[:,1:,:], dim=2)[1]
label2 = torch.max(assginment_mat[:,:,1:], dim=1)[1]
assginment_mat = assginment_mat[:, 1:, 1:] * (label1>0).float().unsqueeze(2) * (label2>0).float().unsqueeze(1)
# max_idx = torch.max(assginment_mat, dim=2, keepdim=True)[1]
# pred_pts = torch.gather(pts2, 1, max_idx.expand_as(pts1))
normalized_assginment_mat = assginment_mat / (assginment_mat.sum(2, keepdim=True) + 1e-6)
pred_pts = normalized_assginment_mat @ pts2
assginment_score = assginment_mat.sum(2)
pred_R, pred_t = WSVD(pred_pts, pts1, assginment_score)
# breakpoint()
# compute score
pred_pts = (pts1 - pred_t.unsqueeze(1)) @ pred_R
dis = torch.sqrt(pairwise_distance(pred_pts, model_pts)).min(2)[0]
mask = (label1>0).float()
pose_score = (dis < dis_thres).float()
pose_score = (pose_score * mask).sum(1) / (mask.sum(1) + 1e-8)
pose_score = pose_score * mask.mean(1)
# add depth filter
if 0:
mesh = end_points['mesh']
K = end_points['K'].reshape(3,3).to(dtype=torch.float32)
sample_pts = torch.tensor(trimesh.sample.sample_surface(mesh, 5000)[0], device = 'cuda',dtype=torch.float32)
transformed_sample_pts = (pred_R @ sample_pts.reshape(-1,3).T).T.reshape(-1,3) + pred_t* (radius + 1e-6)
Z = transformed_sample_pts[..., 2].clamp(min=1e-6)
point_2d = (K @ transformed_sample_pts.reshape(-1,3).T )
point_2d[0] = (point_2d[0]/point_2d[2]).int()
point_2d[1] = (point_2d[1]/point_2d[2] ).int()
x = point_2d[0].int()
y = point_2d[1].int()
depth = end_points['depth']
depth_observation = torch.tensor(0.).repeat(5000).unsqueeze(0).cuda()
for i in range(5000):
try:
depth_observation[0,i] = depth[y[i].item(), x[i].item()]
except IndexError:
depth_observation[0,i] = -1
depth_gap = Z- depth_observation
count = (depth_gap > 0).sum().item()
# # given that prediction center deoth should always be larger than gt depth
if count < 2500:
print('wrong pose from fine network')
pred_R = torch.eye(3).unsqueeze(0).cuda()
pred_t = torch.zeros(1,3).cuda()
return pred_R, pred_t, pose_score, None
# add pose refinement
# mesh = end_points['mesh']
# vertices = torch.tensor(trimesh.bounds.corners(mesh.bounds), device = 'cuda', dtype=torch.float32)
# K = end_points['K'].reshape(3,3)
# transformed_vertices = ((pred_R @ vertices.T).squeeze().T) + (pred_t* (radius.reshape(-1, 1, 1) + 1e-6))
# edges_connection = [(0, 1), (0,3),(0,4), (1,5), (1,2), (2,6), (6,5),(5,1), (4,5), (6,7),(4,7),(3,7)]
# Z_depth = transformed_vertices[..., 2].clamp(min=1e-6)
# point_2d = (K @ transformed_vertices.squeeze().T )
# point_2d[0] = (point_2d[0]/point_2d[2]).int()
# point_2d[1] = (point_2d[1]/point_2d[2] ).int()
# x = point_2d[0].int().cpu().numpy()
# y = point_2d[1].int().cpu().numpy()
# depth = end_points['depth'].cpu().numpy()
# rgb = end_points['rgb_ori'].cpu().numpy().squeeze()
# # get convex hull of object
# points = np.stack([x, y], axis=-1)
# n_sample = 50
# edge_points = []
# pose_correct = True
# vis = rgb.copy()
# for start_idx, end_idx in edges_connection:
# pt1 = tuple(points[start_idx])
# pt2 = tuple(points[end_idx])
# cv2.line(vis, pt1, pt2, color=(0, 255, 0), thickness=2)
# cv2.imwrite('vis.png', vis)
# transformed_vertices = transformed_vertices.detach().cpu().numpy()
# for start_idx, end_idx in edges_connection:
# edge = np.linspace(points[start_idx], points[end_idx], num=n_sample, dtype=int)
# Z_gt = depth[edge[:,1], edge[:,0]]
# Z_object = np.linspace(transformed_vertices[0,start_idx,2], transformed_vertices[0,end_idx,2], num=n_sample, dtype=np.float32)
# X_object = np.linspace(transformed_vertices[0,start_idx,0], transformed_vertices[0,end_idx,0], num=n_sample, dtype=np.float32)
# Y_object = np.linspace(transformed_vertices[0,start_idx,1], transformed_vertices[0,end_idx,1], num=n_sample, dtype=np.float32)
# edge_points.append(np.stack([X_object, Y_object, Z_object], axis=-1))
# depth_gap = Z_gt - Z_object
# depth_gap = np.round(depth_gap, decimals=3)
# # skip if outside the image
# if points[start_idx][0] not in range(0, rgb.shape[1]) or points[start_idx][1] not in range(0, rgb.shape[0]):
# continue
# if points[end_idx][0] not in range(0, rgb.shape[1]) or points[end_idx][1] not in range(0, rgb.shape[0]):
# continue
# # visualize the edge
# vis = rgb.copy()
# cv2.line(vis, tuple(points[start_idx]), tuple(points[end_idx]), (0, 0, 255) , 3)
# cv2.imwrite('vis.png', vis)
# # print(depth_gap)
# if len(depth_gap[depth_gap < -0.02])>=int(0.5*n_sample):
# # print('edge is occluded')
# continue
# not_occluded_depth = depth_gap[depth_gap >= -0.05]
# # set a threshold for depth gap. if larger than 0.05 (5cm), pose is wrong
# # print('num of hover points: ', len(not_occluded_depth[not_occluded_depth> 0.05]))
# # if len(not_occluded_depth[not_occluded_depth> 0.05]) >= int(0.2*n_sample):
# # print('-------------------> too much hover points!!!<------------------')
# # pose_correct = False
# # break
# edge = edge[np.where(not_occluded_depth < 0.05)[0]]
# #now check whether the projected edge is edge or not on the rgbd image. if not, pose is wrong
# # gray = cv2.cvtColor(depth, cv2.COLOR_BGR2GRAY).astype(np.uint8)
# edges = cv2.Canny((depth*1000).astype(np.uint8), threshold1=50, threshold2=150)
# x1,y1 = edge[0]
# x2,y2 = edge[-1]
# edges_mask = np.zeros_like(depth)
# # edges = np.stack((edges,)*3, axis=-1)
# length_projected_edge =int( (abs(x2-x1)**2 + abs(y2-y1)**2)**0.5)
# # cv2.line(edges_mask, tuple(points[i]), tuple(points[i+1]), (1, 1, 1) , 8)
# masked_edges = edges * edges_mask
# valid = np.sum( masked_edges/255)/length_projected_edge
# print('valid:', valid)
# if valid < 1:
# print('edge is not edge on the rgbd image!!!')
# pose_correct = False
# print('pose is correct:',pose_correct)
# edge_points = np.array(edge_points).reshape(-1,3)
return pred_R, pred_t, pose_score, None
def compute_embedding(image):
if 1: # using CLIP
clip_model = "openai/clip-vit-base-patch32"
vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model)
model = CLIPModel.from_pretrained ("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(images=[image], return_tensors="pt", padding=True)
inputs = inputs.to('cpu')
image_outputs = vision_model(**inputs)
img_feats = image_outputs.image_embeds.view(1, -1)
img_feats = img_feats / img_feats.norm(p=2, dim=-1, keepdim=True)
return img_feats.detach()
if 0: # using DINOv2
processor = AutoImageProcessor.from_pretrained('facebook/dinov2-small')
model = AutoModel.from_pretrained('facebook/dinov2-small').to('cuda')
with torch.no_grad():
inputs = processor(images= image, return_tensors = "pt").to('cuda')
outputs = model(**inputs)
embedding = outputs.last_hidden_state.mean(dim=1)
return embedding
def backproject_points(xy_points, depth_image,K):
K_inv = np.linalg.inv(K)
points_3d = []
for (u, v) in xy_points:
u = int(round(u))
v = int(round(v))
z = depth_image[v, u]
pixel_homog = np.array([u, v, 1.0])
pt_3d = z * (K_inv @ pixel_homog)
points_3d.append(pt_3d)
return np.array(points_3d)
def weighted_procrustes(
src_points,
ref_points,
weights=None,
weight_thresh=0.0,
eps=1e-5,
return_transform=False,
src_centroid = None,
ref_centroid = None,
):
r"""Compute rigid transformation from `src_points` to `ref_points` using weighted SVD.
Modified from [PointDSC](https://github.com/XuyangBai/PointDSC/blob/master/models/common.py).
Args:
src_points: torch.Tensor (B, N, 3) or (N, 3)
ref_points: torch.Tensor (B, N, 3) or (N, 3)
weights: torch.Tensor (B, N) or (N,) (default: None)
weight_thresh: float (default: 0.)
eps: float (default: 1e-5)
return_transform: bool (default: False)
Returns:
R: torch.Tensor (B, 3, 3) or (3, 3)
t: torch.Tensor (B, 3) or (3,)
transform: torch.Tensor (B, 4, 4) or (4, 4)
"""
if src_points.ndim == 2:
src_points = src_points.unsqueeze(0)
ref_points = ref_points.unsqueeze(0)
if weights is not None:
weights = weights.unsqueeze(0)
squeeze_first = True
else:
squeeze_first = False
batch_size = src_points.shape[0]
if weights is None:
weights = torch.ones_like(src_points[:, :, 0])
weights = torch.where(torch.lt(weights, weight_thresh), torch.zeros_like(weights), weights)
weights = weights / (torch.sum(weights, dim=1, keepdim=True) + eps)
weights = weights.unsqueeze(2) # (B, N, 1)
if src_centroid is None:
src_centroid = torch.sum(src_points * weights, dim=1, keepdim=True) # (B, 1, 3)
elif len(src_centroid.size()) == 2:
src_centroid = src_centroid.unsqueeze(1)
src_points_centered = src_points - src_centroid # (B, N, 3)
if ref_centroid is None:
ref_centroid = torch.sum(ref_points * weights, dim=1, keepdim=True) # (B, 1, 3)
elif len(ref_centroid.size()) == 2:
ref_centroid = ref_centroid.unsqueeze(1)
ref_points_centered = ref_points - ref_centroid # (B, N, 3)
H = src_points_centered.permute(0, 2, 1) @ (weights * ref_points_centered)
U, _, V = torch.svd(H)
Ut, V = U.transpose(1, 2), V
eye = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).to(src_points.device)
eye[:, -1, -1] = torch.sign(torch.det(V @ Ut))
R = V @ eye @ Ut
t = ref_centroid.permute(0, 2, 1) - R @ src_centroid.permute(0, 2, 1)
t = t.squeeze(2)
if return_transform:
transform = torch.eye(4).unsqueeze(0).repeat(batch_size, 1, 1).cuda()
transform[:, :3, :3] = R
transform[:, :3, 3] = t
if squeeze_first:
transform = transform.squeeze(0)
return transform
else:
if squeeze_first:
R = R.squeeze(0)
t = t.squeeze(0)
return R, t
class WeightedProcrustes(nn.Module):
def __init__(self, weight_thresh=0.5, eps=1e-5, return_transform=False):
super(WeightedProcrustes, self).__init__()
self.weight_thresh = weight_thresh
self.eps = eps
self.return_transform = return_transform
def forward(self, src_points, tgt_points, weights=None,src_centroid = None,ref_centroid = None):
return weighted_procrustes(
src_points,
tgt_points,
weights=weights,
weight_thresh=self.weight_thresh,
eps=self.eps,
return_transform=self.return_transform,
src_centroid=src_centroid,
ref_centroid=ref_centroid
)