| | import numpy as np |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from regress_module import VisProcess, VisTR, VisRes |
| | from torchvision import transforms |
| | import matplotlib.pyplot as plt |
| | import sys |
| | sys.path.append('/home/kejianshi/Desktop/Surgical_Robot/science_robotics/stateregress_back/utils') |
| | from general_utils import AttrDict |
| | |
| | sys.path.append('/home/kejianshi/Desktop/Surgical_Robot/science_robotics/ar_surrol/surrol/tasks') |
| | from depth_anything.dpt import DepthAnything |
| | from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet |
| |
|
| |
|
| | class vismodel(nn.Module): |
| | def __init__( |
| | self, |
| | opts |
| | |
| | ): |
| | super().__init__() |
| | self.opts=opts |
| | self.device=opts.device |
| | |
| | self.img_size=self.opts.img_size |
| | self.obj_num=1 |
| | self.v_processor=VisRes() |
| | if not self.opts.use_exist_depth: |
| | self._load_dam() |
| |
|
| | def _load_dam(self): |
| | encoder = 'vitb' |
| | self.depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_{:}14'.format(encoder)).eval() |
| | |
| |
|
| | def _get_depth_with_dam(self, img): |
| | ''' |
| | input: rgb image 1xHxW |
| | ''' |
| | |
| | |
| | |
| | |
| | with torch.no_grad(): |
| | depth = self.depth_anything(img) |
| |
|
| | |
| | depth = F.interpolate(depth[None], self.img_size, mode='bilinear', align_corners=False)[0] |
| | depth_min = torch.amin(depth, dim=(1, 2), keepdim=True) |
| | depth_max = torch.amax(depth, dim=(1, 2), keepdim=True) |
| | depth = (depth - depth_min) / (depth_max - depth_min) |
| |
|
| | return depth |
| |
|
| | |
| | def normlize_angles(self, x): |
| | return np.arctan2(np.sin(x),np.cos(x)) |
| | |
| | def get_action(self, state, noise=False, v_action=False): |
| | |
| | |
| | if not v_action: |
| | return super().get_action(state, noise) |
| | |
| |
|
| | self.v_processor.eval() |
| | |
| | rgb=self.to_torch(state['depth']).unsqueeze(0) |
| | |
| | depth=self._get_depth_with_dam(rgb)[0] |
| | depth_norm=self.depth_norm.normalize(depth.reshape(-1,256*256),device=self.device).reshape(1,256,256) |
| | seg=self.to_torch(transitions['seg']) |
| | seg_d=torch.concat((seg,depth_norm)) |
| | |
| | inputs=seg_d.unsqueeze(0).float().to(self.device) |
| | |
| | |
| | with torch.no_grad(): |
| | v_output=self.v_processor(inputs).squeeze() |
| |
|
| | |
| | |
| |
|
| | o, g = state['observation'], state['desired_goal'] |
| | g=self.g_norm.normalize(g) |
| | |
| | g_norm=torch.tensor(g).float().to(self.device) |
| | |
| | |
| | if not self.regress_rbt_staet: |
| | robot_state=torch.tensor(o[:7]).to(self.device) |
| | |
| | |
| | rel_pos=v_output[:3*self.obj_num] |
| | new_pos=robot_state[:3]+rel_pos[:3] |
| | |
| | if self.obj_num>1: |
| | for i in range(1, self.obj_num): |
| | pos=robot_state[:3]+rel_pos[i*3:3*self.obj_num] |
| | new_pos=torch.concat((new_pos,pos)) |
| | |
| | waypoint_pos_rot=v_output[3*self.obj_num:] |
| | |
| | |
| | |
| | |
| | |
| | o_new=torch.concat((robot_state, new_pos)) |
| | o_new=torch.concat((o_new, rel_pos)) |
| | o_new=torch.concat((o_new, waypoint_pos_rot)) |
| | o_norm=self.o_norm.normalize(o_new,device=self.device) |
| | else: |
| | o_norm=self.o_norm.normalize(v_output,device=self.device) |
| | o_norm=torch.tensor(o_norm).float().to(self.device) |
| | |
| | input_tensor=torch.concat((o_norm, g_norm), axis=0).to(torch.float32) |
| | |
| | |
| | |
| | |
| | action = self.actor(input_tensor).cpu().data.numpy().flatten() |
| |
|
| | self.v_processor.train() |
| | return action |
| | |
| |
|
| | def forward(self, seg, v_gt): |
| | |
| | |
| | |
| | |
| |
|
| | |
| | seg_d = torch.unsqueeze(seg, 1) |
| | |
| | output=self.v_processor(seg_d) |
| |
|
| | |
| | |
| | |
| | |
| | pos_loss=F.mse_loss(output[:,:3*self.obj_num],v_gt[:,:3*self.obj_num]) |
| | |
| | |
| | |
| | |
| | |
| | metrics = AttrDict( |
| | v_pos=pos_loss.item(), |
| | |
| | |
| | |
| |
|
| | ) |
| | |
| | return metrics, pos_loss |
| | |
| | def get_obs(self, seg, rgb): |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | seg_d = torch.unsqueeze(seg, 1) |
| | output=self.v_processor(seg_d) |
| | |
| | |
| | return output |