update
Browse files- app.py +33 -14
- lib/models/decoder/decoder_hamer_style.py +1 -9
- lib/models/model.py +2 -2
- lib/utils/vis_utils.py +4 -4
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import os
|
| 2 |
import cv2
|
|
|
|
| 3 |
import torch
|
| 4 |
import numpy as np
|
| 5 |
import gradio as gr
|
|
@@ -24,7 +25,7 @@ from mediapipe.tasks.python import BaseOptions
|
|
| 24 |
# Configuration
|
| 25 |
BACKBONE = 'hamer'
|
| 26 |
EXPERIMENT_DIR = 'experiments_demo_image'
|
| 27 |
-
EXAMPLE_DIR = '/home/user/app/asset/example_images'
|
| 28 |
|
| 29 |
# Setup
|
| 30 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
@@ -44,16 +45,17 @@ checkpoint = torch.load(checkpoint_path, map_location=device)
|
|
| 44 |
model.load_state_dict(checkpoint['state_dict'])
|
| 45 |
model.eval()
|
| 46 |
|
| 47 |
-
# Load HandLandmarker
|
| 48 |
base_options = BaseOptions(model_asset_path=cfg.MODEL.hand_landmarker_path)
|
| 49 |
hand_options = vision.HandLandmarkerOptions(base_options=base_options, num_hands=2)
|
| 50 |
detector = vision.HandLandmarker.create_from_options(hand_options)
|
| 51 |
|
|
|
|
| 52 |
def process_image(pil_img: Image.Image):
|
| 53 |
orig_img = np.array(pil_img.convert("RGB"))
|
| 54 |
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=orig_img.copy())
|
| 55 |
detection_result = detector.detect(mp_image)
|
| 56 |
-
|
| 57 |
|
| 58 |
if right_hand_bbox is None:
|
| 59 |
return None, "No hand detected."
|
|
@@ -61,6 +63,7 @@ def process_image(pil_img: Image.Image):
|
|
| 61 |
crop_img, _, _, _, _, _ = augmentation_contact(
|
| 62 |
orig_img.copy(), right_hand_bbox, 'test', enforce_flip=False)
|
| 63 |
|
|
|
|
| 64 |
if BACKBONE in ['handoccnet'] or 'resnet' in cfg.MODEL.backbone_type or 'hrnet' in cfg.MODEL.backbone_type:
|
| 65 |
img_tensor = transforms.ToTensor()(crop_img.astype(np.float32) / 255.0)
|
| 66 |
elif BACKBONE in ['hamer'] or 'vit' in cfg.MODEL.backbone_type:
|
|
@@ -70,29 +73,45 @@ def process_image(pil_img: Image.Image):
|
|
| 70 |
else:
|
| 71 |
raise NotImplementedError(f"Unsupported backbone: {BACKBONE}")
|
| 72 |
|
|
|
|
| 73 |
with torch.no_grad():
|
| 74 |
outputs = model({'input': {'image': img_tensor[None].to(device)}}, mode="test")
|
| 75 |
|
|
|
|
| 76 |
eval_thres = get_contact_thres(BACKBONE)
|
| 77 |
contact_mask = (outputs['contact_out'][0] > eval_thres).detach().cpu().numpy()
|
| 78 |
contact_mask = remove_small_contact_components(contact_mask, faces=mano.watertight_face['right'], min_size=20)
|
| 79 |
-
mesh_path = contact_renderer.export_contact_mesh(contact_mask)
|
| 80 |
-
return mesh_path
|
| 81 |
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
|
|
|
|
|
|
|
| 84 |
description = '''
|
| 85 |
### HACO: Learning Dense Hand Contact Estimation from Imbalanced Data
|
| 86 |
-
|
| 87 |
-
Upload an image of a hand-object interaction scene, and HACO will predict and visualize contact regions on the hand.
|
| 88 |
'''
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
input_image = gr.Image(label="Input Image", type="pil")
|
| 93 |
-
output_model = gr.Model3D(label="Contact Mesh")
|
| 94 |
-
run_button = gr.Button("Run HACO")
|
| 95 |
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
demo.launch()
|
|
|
|
| 1 |
import os
|
| 2 |
import cv2
|
| 3 |
+
import uuid
|
| 4 |
import torch
|
| 5 |
import numpy as np
|
| 6 |
import gradio as gr
|
|
|
|
| 25 |
# Configuration
|
| 26 |
BACKBONE = 'hamer'
|
| 27 |
EXPERIMENT_DIR = 'experiments_demo_image'
|
| 28 |
+
EXAMPLE_DIR = '/home/user/app/asset/example_images'
|
| 29 |
|
| 30 |
# Setup
|
| 31 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
| 45 |
model.load_state_dict(checkpoint['state_dict'])
|
| 46 |
model.eval()
|
| 47 |
|
| 48 |
+
# Load Mediapipe HandLandmarker
|
| 49 |
base_options = BaseOptions(model_asset_path=cfg.MODEL.hand_landmarker_path)
|
| 50 |
hand_options = vision.HandLandmarkerOptions(base_options=base_options, num_hands=2)
|
| 51 |
detector = vision.HandLandmarker.create_from_options(hand_options)
|
| 52 |
|
| 53 |
+
# Inference function
|
| 54 |
def process_image(pil_img: Image.Image):
|
| 55 |
orig_img = np.array(pil_img.convert("RGB"))
|
| 56 |
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=orig_img.copy())
|
| 57 |
detection_result = detector.detect(mp_image)
|
| 58 |
+
_, right_hand_bbox = draw_landmarks_on_image(orig_img.copy(), detection_result)
|
| 59 |
|
| 60 |
if right_hand_bbox is None:
|
| 61 |
return None, "No hand detected."
|
|
|
|
| 63 |
crop_img, _, _, _, _, _ = augmentation_contact(
|
| 64 |
orig_img.copy(), right_hand_bbox, 'test', enforce_flip=False)
|
| 65 |
|
| 66 |
+
# Prepare input tensor
|
| 67 |
if BACKBONE in ['handoccnet'] or 'resnet' in cfg.MODEL.backbone_type or 'hrnet' in cfg.MODEL.backbone_type:
|
| 68 |
img_tensor = transforms.ToTensor()(crop_img.astype(np.float32) / 255.0)
|
| 69 |
elif BACKBONE in ['hamer'] or 'vit' in cfg.MODEL.backbone_type:
|
|
|
|
| 73 |
else:
|
| 74 |
raise NotImplementedError(f"Unsupported backbone: {BACKBONE}")
|
| 75 |
|
| 76 |
+
# Inference
|
| 77 |
with torch.no_grad():
|
| 78 |
outputs = model({'input': {'image': img_tensor[None].to(device)}}, mode="test")
|
| 79 |
|
| 80 |
+
# Postprocess contact mask
|
| 81 |
eval_thres = get_contact_thres(BACKBONE)
|
| 82 |
contact_mask = (outputs['contact_out'][0] > eval_thres).detach().cpu().numpy()
|
| 83 |
contact_mask = remove_small_contact_components(contact_mask, faces=mano.watertight_face['right'], min_size=20)
|
|
|
|
|
|
|
| 84 |
|
| 85 |
+
# Export .glb mesh
|
| 86 |
+
output_path = f"/tmp/contact_mesh_{uuid.uuid4().hex}.glb"
|
| 87 |
+
mesh_path = contact_renderer.export_contact_mesh(contact_mask, output_path=output_path)
|
| 88 |
|
| 89 |
+
return mesh_path, "Success"
|
| 90 |
+
|
| 91 |
+
# Gradio UI
|
| 92 |
description = '''
|
| 93 |
### HACO: Learning Dense Hand Contact Estimation from Imbalanced Data
|
| 94 |
+
Upload an image of a hand-object interaction. HACO will predict contact regions and return a colored 3D hand mesh.
|
|
|
|
| 95 |
'''
|
| 96 |
|
| 97 |
+
with gr.Blocks(title="HACO Image Demo") as demo:
|
| 98 |
+
gr.Markdown(description)
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
+
with gr.Row():
|
| 101 |
+
input_image = gr.Image(label="Input Image", type="pil")
|
| 102 |
+
output_model = gr.Model3D(label="Predicted Contact Mesh")
|
| 103 |
+
output_status = gr.Textbox(label="Status")
|
| 104 |
+
|
| 105 |
+
run_button = gr.Button("Run HACO")
|
| 106 |
+
run_button.click(fn=process_image, inputs=input_image, outputs=[output_model, output_status])
|
| 107 |
+
|
| 108 |
+
# Example image list
|
| 109 |
+
example_list = [
|
| 110 |
+
[os.path.join(EXAMPLE_DIR, f)] for f in [
|
| 111 |
+
"holding_cup1.jpg", "holding_cup2.jpg", "holding_hammer1.jpg",
|
| 112 |
+
"holding_scissors2.jpg", "squidgame_demo2.png", "touching_wall1.jpg", "using_pen1.jpg"
|
| 113 |
+
]
|
| 114 |
+
]
|
| 115 |
+
gr.Examples(examples=example_list, inputs=[input_image], label="Example Images")
|
| 116 |
|
| 117 |
demo.launch()
|
lib/models/decoder/decoder_hamer_style.py
CHANGED
|
@@ -17,10 +17,6 @@ from lib.core.config import cfg
|
|
| 17 |
from lib.utils.human_models import mano
|
| 18 |
|
| 19 |
|
| 20 |
-
V_regressor_336 = np.load(cfg.MODEL.V_regressor_336_path)
|
| 21 |
-
V_regressor_84 = np.load(cfg.MODEL.V_regressor_84_path)
|
| 22 |
-
|
| 23 |
-
|
| 24 |
# This function is from HaMeR (https://github.com/geopavlakos/hamer).
|
| 25 |
def exists(val):
|
| 26 |
return val is not None
|
|
@@ -629,9 +625,5 @@ class ContactTransformerDecoderHead(nn.Module):
|
|
| 629 |
pred_contact = self.deccontact(token_out) + pred_contact
|
| 630 |
# pred_contact = pred_contact.sigmoid()
|
| 631 |
|
| 632 |
-
# Joint contact
|
| 633 |
-
pred_joint_contact = (torch.tensor(mano.joint_regressor, dtype=torch.float32, device=device) @ pred_contact.T).T
|
| 634 |
-
pred_mesh_contact_336 = (torch.tensor(V_regressor_336, dtype=torch.float32, device=device) @ pred_contact.T).T
|
| 635 |
-
pred_mesh_contact_84 = (torch.tensor(V_regressor_84, dtype=torch.float32, device=device) @ pred_contact.T).T
|
| 636 |
|
| 637 |
-
return pred_contact
|
|
|
|
| 17 |
from lib.utils.human_models import mano
|
| 18 |
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# This function is from HaMeR (https://github.com/geopavlakos/hamer).
|
| 21 |
def exists(val):
|
| 22 |
return val is not None
|
|
|
|
| 625 |
pred_contact = self.deccontact(token_out) + pred_contact
|
| 626 |
# pred_contact = pred_contact.sigmoid()
|
| 627 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 628 |
|
| 629 |
+
return pred_contact
|
lib/models/model.py
CHANGED
|
@@ -24,9 +24,9 @@ class HACO(nn.Module):
|
|
| 24 |
image = F.interpolate(image, size=(224, 224), mode='bilinear', align_corners=False)
|
| 25 |
|
| 26 |
img_feat = self.backbone(image)
|
| 27 |
-
contact_out
|
| 28 |
|
| 29 |
-
return dict(contact_out=contact_out
|
| 30 |
|
| 31 |
|
| 32 |
|
|
|
|
| 24 |
image = F.interpolate(image, size=(224, 224), mode='bilinear', align_corners=False)
|
| 25 |
|
| 26 |
img_feat = self.backbone(image)
|
| 27 |
+
contact_out = self.decoder(img_feat)
|
| 28 |
|
| 29 |
+
return dict(contact_out=contact_out)
|
| 30 |
|
| 31 |
|
| 32 |
|
lib/utils/vis_utils.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
import cv2
|
| 3 |
import torch
|
|
|
|
| 4 |
import trimesh
|
| 5 |
import numpy as np
|
| 6 |
import matplotlib.cm as cm
|
|
@@ -24,13 +25,12 @@ class ContactRenderer:
|
|
| 24 |
mano_rest_out = mano.layer['right'](betas=torch.zeros((1, 10)), hand_pose=torch.from_numpy(hand_pose[None, 3:]).float(), global_orient=torch.zeros((1, 3)), transl=torch.zeros((1, 3)))
|
| 25 |
self.hand_model_mano = trimesh.Trimesh(mano_rest_out.vertices[0], mano.watertight_face['right'])
|
| 26 |
|
| 27 |
-
def export_contact_mesh(self, contact_mask, output_path=
|
| 28 |
vis_contact = contact_mask == 1.0
|
| 29 |
-
# Apply default color
|
| 30 |
self.hand_model_mano.visual.vertex_colors = np.tile(self.default_mesh_color, (self.hand_model_mano.vertices.shape[0], 1))
|
| 31 |
-
# Apply contact color
|
| 32 |
self.hand_model_mano.visual.vertex_colors[vis_contact] = self.contact_mesh_color
|
| 33 |
-
|
|
|
|
| 34 |
self.hand_model_mano.export(output_path)
|
| 35 |
return output_path
|
| 36 |
|
|
|
|
| 1 |
import os
|
| 2 |
import cv2
|
| 3 |
import torch
|
| 4 |
+
import uuid
|
| 5 |
import trimesh
|
| 6 |
import numpy as np
|
| 7 |
import matplotlib.cm as cm
|
|
|
|
| 25 |
mano_rest_out = mano.layer['right'](betas=torch.zeros((1, 10)), hand_pose=torch.from_numpy(hand_pose[None, 3:]).float(), global_orient=torch.zeros((1, 3)), transl=torch.zeros((1, 3)))
|
| 26 |
self.hand_model_mano = trimesh.Trimesh(mano_rest_out.vertices[0], mano.watertight_face['right'])
|
| 27 |
|
| 28 |
+
def export_contact_mesh(self, contact_mask, output_path=None):
|
| 29 |
vis_contact = contact_mask == 1.0
|
|
|
|
| 30 |
self.hand_model_mano.visual.vertex_colors = np.tile(self.default_mesh_color, (self.hand_model_mano.vertices.shape[0], 1))
|
|
|
|
| 31 |
self.hand_model_mano.visual.vertex_colors[vis_contact] = self.contact_mesh_color
|
| 32 |
+
if output_path is None:
|
| 33 |
+
output_path = f"/tmp/contact_mesh_{uuid.uuid4().hex}.glb"
|
| 34 |
self.hand_model_mano.export(output_path)
|
| 35 |
return output_path
|
| 36 |
|