File size: 4,732 Bytes
d0e86f6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import trimesh
from actionmesh.render.cameras import get_uniform_camera
from actionmesh.render.renderer import Renderer
from actionmesh.render.utils import resample_list, save_multiview_video_grid
from PIL import Image
from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import Meshes
from tqdm import tqdm
logger = logging.getLogger(__name__)
def array_to_img(X: torch.Tensor) -> Image.Image:
if X.shape[1] == 1:
X = X.repeat(1, 3, 1, 1)
X = X.permute(0, 2, 3, 1).cpu().numpy()
X = (X * 255).astype(np.uint8)
return Image.fromarray(X[0])
def image_and_mask_to_rgba(image: Image.Image, mask: Image.Image) -> Image.Image:
"""
Return RGBA image from RGB+Mask(L).
"""
if image.mode != "RGB":
image = image.convert("RGB")
if mask.mode != "L":
mask = mask.convert("L")
if mask.size != image.size:
mask = mask.resize(image.size, resample=Image.NEAREST)
rgba_image = image.copy()
rgba_image.putalpha(mask)
return rgba_image
def trimesh_to_pytorch3d(mesh: trimesh.Trimesh, device: str = "cpu") -> Meshes:
"""Convert a trimesh mesh to a pytorch3d Meshes object."""
verts = torch.from_numpy(mesh.vertices).float().to(device)
faces = torch.from_numpy(mesh.faces).long().to(device)
# -- Blue texture
blue_color = torch.tensor([0.0, 0.0, 1.0]).to(device)
vertex_colors = [blue_color.expand(verts.shape[0], -1)]
texture = TexturesVertex(vertex_colors)
return Meshes(verts=[verts], faces=[faces], textures=texture)
class ActionMeshVisualizer(nn.Module):
"""Visualizer to render from pipeline output"""
def __init__(
self,
image_size: int = 256,
bg_color: tuple[float, float, float] = (1.0, 1.0, 1.0),
cameras: list[str] = ["U000", "U004", "U008"],
):
super().__init__()
self.image_size = image_size
# -- Initialize renderer
self.renderer = Renderer(
img_size=image_size,
background_color=bg_color,
)
# -- Initialize cameras
self.cameras = {
k: v for (k, v) in get_uniform_camera(distance=3.0).items() if k in cameras
}
@torch.no_grad()
def render(
self,
meshes: list[trimesh.Trimesh],
device: str,
output_dir: str,
input_frames: Optional[list[Image.Image]],
):
"""Render modalities from an output"""
n_cameras = len(self.cameras)
n_frames = len(meshes)
# If input frames are given, adapt to the number of meshes
if input_frames is not None:
input_frames = resample_list(input_frames, n_frames)
self.renderer = self.renderer.to(device)
global_predictions = []
if input_frames is not None:
global_predictions.append(
[{"mask": frame, "normal": frame} for frame in input_frames]
)
pbar = tqdm(total=n_cameras * n_frames, desc="Rendering")
for view_id, camera in self.cameras.items():
predictions = []
for mesh in meshes:
pbar.set_postfix(
camera=view_id, frame=f"{len(predictions)+1}/{n_frames}"
)
mesh_p3d = trimesh_to_pytorch3d(mesh, device=device)
camera = camera.to(device)
(
image_render,
mask_render,
normal_render,
) = self.renderer(
meshes=mesh_p3d,
cameras=camera,
return_normals=True,
)
preds = {
"mask": array_to_img(mask_render),
"normal": array_to_img(normal_render),
}
preds = {
k: image_and_mask_to_rgba(preds[k], preds["mask"]) for k in preds
}
predictions.append(preds)
pbar.update(1)
global_predictions.append(predictions)
pbar.close()
saved_files = save_multiview_video_grid(
global_predictions,
output_dir,
modalities=["normal"],
n_cols=n_cameras + 1 if input_frames is not None else n_cameras,
image_size=self.image_size,
)
for filepath in saved_files:
logger.info(f"Saved render: {filepath}")
|