AnyBox / project /grasp_box /submodules /SAM6D /pose_estimator.py
savoji's picture
Add files using upload-large-folder tool
b24c748 verified
import os
import sys
import numpy as np
import importlib
import torch
import cv2
import trimesh
import yaml
import copy
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
import open3d as o3d
ROOT_DIR = "/home/kyber/charles/project/grasp_box/submodules/SAM6D"
sys.path.append(os.path.join(ROOT_DIR, 'provider'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
# sys.path.append(os.path.join(ROOT_DIR, 'config'))
sys.path.append(os.path.join(ROOT_DIR, 'model'))
sys.path.append(os.path.join(ROOT_DIR, 'model', 'pointnet2'))
from data_utils import (
load_im,
get_bbox,
get_point_cloud_from_depth,
get_resize_rgb_choose,
trimesh_to_open3d
)
from draw_utils import draw_detections
from submodules.SAM6D.config.config import Config
rgb_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
def visualize(rgb, pred_rot, pred_trans, model_points, K, save_path):
img = draw_detections(rgb, pred_rot, pred_trans, model_points, K, color=(255, 0, 0))
img = Image.fromarray(np.uint8(img))
# img.save(save_path)
# prediction = Image.open(save_path)
prediction = img
# concat side by side in PIL
rgb = Image.fromarray(np.uint8(rgb))
img = np.array(img)
concat = Image.new('RGB', (img.shape[1] + prediction.size[0], img.shape[0]))
concat.paste(rgb, (0, 0))
concat.paste(prediction, (img.shape[1], 0))
return img
class SAM6DPoseEstimator:
def __init__(
self,
config_path: str,
K: np.array,
model_cfg_path: str,
vis: bool
):
"""Initialize the pose estimator with the given configuration file.
Args:
config_path (str): Path to the configuration YAML file.
"""
if config_path is not None:
with open(config_path, "r") as file:
self.config = yaml.safe_load(file)
self.cad_database = self.config["cad_database"]
else:
self.cad_databset = {"box": '/home/kyber/charles/project/ManiSkill3/src/maniskill2_benchmark/msx-envs/src/msx_envs/assets/object/box_01/box_01.gltf'}
self.cfg = Config.fromfile(model_cfg_path)
self.cfg.exp_name = "test"
self.cfg.model_name = "pose_estimation_model"
self.cfg.log_dir = "log"
self.cfg.output_dir = "log"
self.cfg.det_score_thresh = 0.2
self.K = K
self.vis = vis
MODEL = importlib.import_module(self.cfg.model_name)
self.model = MODEL.Net(self.cfg.model)
self.model = self.model.cuda()
self.model.eval()
self.model.load_state_dict(torch.load("/home/kyber/charles/project/grasp_box/weights/SAM_6D/sam-6d-pem-base.pth", weights_only=True)['model'])
self.cad_cache = {}
self.load_all_mesh()
def _get_template(self, obj, tem_index=1, scale = None):
path = os.path.dirname(self.cad_database[obj])
rgb_path = os.path.join(path, 'rgb_'+str(tem_index)+'.png')
mask_path = os.path.join(path, 'mask_'+str(tem_index)+'.png')
xyz_path = os.path.join(path, 'xyz_'+str(tem_index)+'.npy')
rgb = load_im(rgb_path).astype(np.uint8)
xyz = np.load(xyz_path).astype(np.float32)
mask = load_im(mask_path).astype(np.uint8) == 255
if scale is not None:
H, W = mask.shape
ys, xs = np.where(mask == 1)
y1, y2 = ys.min(), ys.max()
x1, x2 = xs.min(), xs.max()
xyz_crop = xyz[y1:y2+1, x1:x2+1]
xyz_flat = xyz_crop.reshape(-1,3)
xyz_flat[:,0] *= scale[0]
xyz_flat[:,1] *= scale[1]
xyz_flat[:,2] *= scale[2]
xyz_recover = np.full((H, W, 3), (-1,-1,-1), dtype=np.float32)
xyz_recover[y1:y2+1, x1:x2+1] = xyz_flat.reshape(y2-y1+1, x2-x1+1,3)
xyz = xyz_recover
bbox = get_bbox(mask)
y1, y2, x1, x2 = bbox
mask = mask[y1:y2, x1:x2]
rgb = rgb[:,:,::-1][y1:y2, x1:x2, :]
if self.cfg.test_dataset.rgb_mask_flag:
rgb = rgb * (mask[:,:,None]>0).astype(np.uint8)
rgb = cv2.resize(rgb, (self.cfg.test_dataset.img_size, self.cfg.test_dataset.img_size), interpolation=cv2.INTER_LINEAR)
rgb = rgb_transform(np.array(rgb))
choose = (mask>0).astype(np.float32).flatten().nonzero()[0]
if len(choose) <= self.cfg.test_dataset.n_sample_template_point:
choose_idx = np.random.choice(np.arange(len(choose)), self.cfg.test_dataset.n_sample_template_point)
else:
choose_idx = np.random.choice(np.arange(len(choose)), self.cfg.test_dataset.n_sample_template_point, replace=False)
choose = choose[choose_idx]
xyz = xyz[y1:y2, x1:x2, :].reshape((-1, 3))[choose, :]
rgb_choose = get_resize_rgb_choose(choose, [y1, y2, x1, x2], self.cfg.test_dataset.img_size)
return rgb, rgb_choose, xyz
def get_templates(self, obj, scale = None):
n_template_view = self.cfg.test_dataset.n_template_view
all_tem = []
all_tem_choose = []
all_tem_pts = []
total_nView = 42
for v in range(n_template_view):
i = int(total_nView / n_template_view * v)
tem, tem_choose, tem_pts = self._get_template(obj, i, scale)
all_tem.append(torch.FloatTensor(tem).unsqueeze(0).cuda())
all_tem_choose.append(torch.IntTensor(tem_choose).long().unsqueeze(0).cuda())
all_tem_pts.append(torch.FloatTensor(tem_pts).unsqueeze(0).cuda())
return all_tem, all_tem_pts, all_tem_choose
def load_all_mesh(self, scale = None, rescale_obj = None):
if rescale_obj is not None:
mid_x, mid_y, mid_z = scale
scale_matrix = np.eye(4)
scale_matrix[0,0] = mid_x
scale_matrix[1,1] = mid_y
scale_matrix[2,2] = mid_z
mesh = self.cad_cache[rescale_obj]['mesh']
mesh_s=copy.deepcopy(mesh)
mesh_s.apply_transform(scale_matrix)
model_points = mesh_s.sample(self.cfg.test_dataset.n_sample_model_point).astype(np.float32)
radius = np.max(np.linalg.norm(model_points, axis=1))
all_tem, all_tem_pts, all_tem_choose = self.get_templates(rescale_obj,scale)
with torch.no_grad():
all_tem_pts, all_tem_feat = self.model.feature_extraction.get_obj_feats(all_tem, all_tem_pts, all_tem_choose)
self.all_mesh_dict[rescale_obj] = mesh_s
self.cad_cache['tmp'] = {
'radius': radius,
'mesh': mesh_s,
'model_points': model_points,
'all_tem_pts': all_tem_pts,
'all_tem_feat': all_tem_feat,
}
return
all_mesh_dict = {}
for obj in self.cad_database:
print(obj, self.cad_database[obj])
try:
mesh = trimesh.load(self.cad_database[obj], force='mesh')
all_mesh_dict[obj] = mesh
except:
print("missing mesh")
exit()
model_points = mesh.sample(self.cfg.test_dataset.n_sample_model_point).astype(np.float32)
radius = np.max(np.linalg.norm(model_points, axis=1))
all_tem, all_tem_pts, all_tem_choose = self.get_templates(obj)
with torch.no_grad():
all_tem_pts, all_tem_feat = self.model.feature_extraction.get_obj_feats(all_tem, all_tem_pts, all_tem_choose)
self.cad_cache[obj] = {
'radius': radius,
'mesh': mesh,
'model_points': model_points,
'all_tem_pts': all_tem_pts,
'all_tem_feat': all_tem_feat,
}
self.all_mesh_dict = all_mesh_dict
print("Pose Estimator S6 load meshes:", self.all_mesh_dict.keys())
def get_data(self, rgb, mask, depth, obj, scale):
if scale is not None:
self.load_all_mesh(scale, rescale_obj=obj)
rgb_ori = rgb.copy()
if scale is not None:
obj_mesh = self.cad_cache['tmp']
else:
obj_mesh = self.cad_cache[obj]
depth = depth.astype(np.float64)
depth /= 1000
whole_pts = get_point_cloud_from_depth(depth, self.K)
mask_ori = mask.copy()
mask = np.logical_and(mask > 0, depth > 0)
bbox = get_bbox(mask)
y1, y2, x1, x2 = bbox
mask = mask[y1:y2, x1:x2]
choose = mask.astype(np.float32).flatten().nonzero()[0]
cloud = whole_pts.copy()[y1:y2, x1:x2, :].reshape(-1, 3)[choose, :]
center = np.mean(cloud, axis=0)
tmp_cloud = cloud - center[None, :]
flag = np.linalg.norm(tmp_cloud, axis=1) < obj_mesh['radius'] * 1.2
choose = choose[flag]
cloud = cloud[flag]
obs_pts = self.cfg.test_dataset.n_sample_observed_point
if len(choose) <= obs_pts:
try:
choose_idx = np.random.choice(np.arange(len(choose)), obs_pts)
except ValueError:
return None
else:
choose_idx = np.random.choice(np.arange(len(choose)), obs_pts, replace=False)
choose = choose[choose_idx]
cloud = cloud[choose_idx]
img_size = self.cfg.test_dataset.img_size
rgb = rgb.copy()[y1:y2, x1:x2, :][:,:,::-1]
if self.cfg.test_dataset.rgb_mask_flag:
rgb = rgb * (mask[:,:,None]>0).astype(np.uint8)
rgb = cv2.resize(rgb, (img_size, img_size), interpolation=cv2.INTER_LINEAR)
rgb = rgb_transform(np.array(rgb))
rgb_choose = get_resize_rgb_choose(choose, [y1, y2, x1, x2], img_size)
n_try = 1
ret_dict = {
'pts': torch.FloatTensor(np.array([cloud]*n_try)).cuda(),
'rgb': torch.unsqueeze(rgb, 0).repeat(n_try,1, 1, 1).cuda(),
'rgb_choose': torch.IntTensor(np.array([rgb_choose]*n_try)).long().cuda(),
'model': torch.FloatTensor(obj_mesh['model_points']).unsqueeze(0).repeat(n_try, 1, 1).cuda(),
'K': torch.FloatTensor(self.K).unsqueeze(0).repeat(n_try, 1, 1).cuda(),
'dense_po' : obj_mesh['all_tem_pts'].repeat(n_try,1,1),
'dense_fo' : obj_mesh['all_tem_feat'].repeat(n_try,1,1),
"mesh": obj_mesh['mesh'],
"depth": torch.FloatTensor(depth).cuda(),
'mask': torch.FloatTensor(mask).cuda(),
'bbox': torch.FloatTensor([y1, y2, x1, x2]),
'rgb_ori': torch.FloatTensor(rgb_ori).cuda(),
'mask_ori': torch.FloatTensor(mask_ori).cuda(),
}
return ret_dict
def inference(self, rgb, mask, depth, obj, scale = None):
ret_dict = self.get_data(rgb, mask, depth, obj, scale)
if ret_dict == None:
return None, None, None,None, None
out, points = self.model(ret_dict)
if out['pred_pose_score']!= None:
pose_scores = out['pred_pose_score'].detach().cpu().numpy()
else:
pose_scores = None
pred_rot = out['pred_R'].detach().cpu().numpy()
pred_trans = out['pred_t'].detach().cpu().numpy()
# pred_rot = out['init_R'].detach().cpu().numpy()
# pred_trans = out['init_t'].detach().cpu().numpy()
# pred_trans = pred_trans * (self.cad_cache[obj]['radius'] + 1e-6)
vis = rgb.copy()
if self.vis:
# vis = visualize(rgb, pred_rot, pred_trans, self.cad_cache[obj]["model_points"], ret_dict["K"].cpu(), f"SAM6D_{obj}.png")
if scale is not None:
vis = visualize(rgb, pred_rot, pred_trans, self.cad_cache['tmp']["model_points"], ret_dict["K"].cpu(), f"SAM6D_{obj}.png")
else:
vis = visualize(rgb, pred_rot, pred_trans, self.cad_cache[obj]["model_points"], ret_dict["K"].cpu(), f"SAM6D_{obj}.png")
# return pose_scores, pred_rot, pred_trans,vis
return pose_scores, pred_rot, pred_trans,vis, points