update
Browse files
README.md
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Pose3D
|
| 3 |
-
emoji: 🦀
|
| 4 |
-
colorFrom: gray
|
| 5 |
-
colorTo: red
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 3.16.2
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: mit
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
|
@@ -1,149 +1,20 @@
|
|
| 1 |
-
import
|
| 2 |
import gradio as gr
|
| 3 |
import numpy as np
|
| 4 |
-
from
|
| 5 |
-
from pytorch3d.renderer import TexturesVertex as Textures
|
| 6 |
-
from pytorch3d.structures import Meshes
|
| 7 |
-
import torch
|
| 8 |
-
|
| 9 |
-
mesh_paths = {
|
| 10 |
-
"Aeroplane": "CAD_selected/aeroplane.off",
|
| 11 |
-
"Bicycle": "CAD_selected/bicycle.off",
|
| 12 |
-
"Boat": "CAD_selected/boat.off",
|
| 13 |
-
"Bottle": "CAD_selected/bottle.off",
|
| 14 |
-
"Bus": "CAD_selected/bus.off",
|
| 15 |
-
"Car": "CAD_selected/car.off",
|
| 16 |
-
"Chair": "CAD_selected/chair.off",
|
| 17 |
-
"Diningtable": "CAD_selected/diningtable.off",
|
| 18 |
-
"Motorbike": "CAD_selected/motorbike.off",
|
| 19 |
-
"Sofa": "CAD_selected/sofa.off",
|
| 20 |
-
"Train": "CAD_selected/train.off",
|
| 21 |
-
"Tvmonitor": "CAD_selected/tvmonitor.off",
|
| 22 |
-
}
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def rotation_theta(theta, device_=None):
|
| 26 |
-
# cos -sin 0
|
| 27 |
-
# sin cos 0
|
| 28 |
-
# 0 0 1
|
| 29 |
-
if type(theta) == float:
|
| 30 |
-
if device_ is None:
|
| 31 |
-
device_ = 'cpu'
|
| 32 |
-
theta = torch.ones((1, 1, 1)).to(device_) * theta
|
| 33 |
-
else:
|
| 34 |
-
if device_ is None:
|
| 35 |
-
device_ = theta.device
|
| 36 |
-
theta = theta.view(-1, 1, 1)
|
| 37 |
-
|
| 38 |
-
mul_ = torch.Tensor([[1, 0, 0, 0, 1, 0, 0, 0, 0], [0, -1, 0, 1, 0, 0, 0, 0, 0]]).view(1, 2, 9).to(device_)
|
| 39 |
-
bia_ = torch.Tensor([0] * 8 + [1]).view(1, 1, 9).to(device_)
|
| 40 |
-
|
| 41 |
-
# [n, 1, 2]
|
| 42 |
-
cos_sin = torch.cat((torch.cos(theta), torch.sin(theta)), dim=2).to(device_)
|
| 43 |
-
|
| 44 |
-
# [n, 1, 2] @ [1, 2, 9] + [1, 1, 9] => [n, 1, 9] => [n, 3, 3]
|
| 45 |
-
trans = torch.matmul(cos_sin, mul_) + bia_
|
| 46 |
-
trans = trans.view(-1, 3, 3)
|
| 47 |
-
|
| 48 |
-
return trans
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
def campos_to_R_T(campos, theta, device='cpu', at=((0, 0, 0),), up=((0, 1, 0), )):
|
| 52 |
-
R = look_at_rotation(campos, at=at, device=device, up=up) # (n, 3, 3)
|
| 53 |
-
R = torch.bmm(R, rotation_theta(theta, device_=device))
|
| 54 |
-
T = -torch.bmm(R.transpose(1, 2), campos.unsqueeze(2))[:, :, 0] # (1, 3)
|
| 55 |
-
return R, T
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
def load_off(off_file_name, to_torch=False):
|
| 59 |
-
file_handle = open(off_file_name)
|
| 60 |
-
|
| 61 |
-
file_list = file_handle.readlines()
|
| 62 |
-
n_points = int(file_list[1].split(' ')[0])
|
| 63 |
-
all_strings = ''.join(file_list[2:2 + n_points])
|
| 64 |
-
array_ = np.fromstring(all_strings, dtype=np.float32, sep='\n')
|
| 65 |
-
|
| 66 |
-
all_strings = ''.join(file_list[2 + n_points:])
|
| 67 |
-
array_int = np.fromstring(all_strings, dtype=np.int32, sep='\n')
|
| 68 |
-
|
| 69 |
-
array_ = array_.reshape((-1, 3))
|
| 70 |
-
|
| 71 |
-
if not to_torch:
|
| 72 |
-
return array_, array_int.reshape((-1, 4))[:, 1::]
|
| 73 |
-
else:
|
| 74 |
-
return torch.from_numpy(array_), torch.from_numpy(array_int.reshape((-1, 4))[:, 1::])
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
def pre_process_mesh_pascal(verts):
|
| 78 |
-
verts = torch.cat((verts[:, 0:1], verts[:, 2:3], -verts[:, 1:2]), dim=1)
|
| 79 |
-
return verts
|
| 80 |
|
| 81 |
|
| 82 |
def render(azimuth, elevation, theta, dist, category, unit):
|
| 83 |
-
|
| 84 |
-
elevation = float(elevation)
|
| 85 |
-
theta = float(theta)
|
| 86 |
-
dist = float(dist)
|
| 87 |
-
|
| 88 |
-
h, w = 256, 256
|
| 89 |
-
render_image_size = max(h, w)
|
| 90 |
-
crop_size = (256, 256)
|
| 91 |
-
device = 'cpu'
|
| 92 |
-
|
| 93 |
-
cameras = PerspectiveCameras(focal_length=12.0, device=device)
|
| 94 |
-
raster_settings = RasterizationSettings(
|
| 95 |
-
image_size=render_image_size,
|
| 96 |
-
blur_radius=0.0,
|
| 97 |
-
faces_per_pixel=1,
|
| 98 |
-
bin_size=0
|
| 99 |
-
)
|
| 100 |
-
raster_settings1 = RasterizationSettings(
|
| 101 |
-
image_size=render_image_size // 8,
|
| 102 |
-
blur_radius=0.0,
|
| 103 |
-
faces_per_pixel=1,
|
| 104 |
-
bin_size=0
|
| 105 |
-
)
|
| 106 |
-
rasterizer = MeshRasterizer(
|
| 107 |
-
cameras=cameras,
|
| 108 |
-
raster_settings=raster_settings1
|
| 109 |
-
)
|
| 110 |
-
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
|
| 111 |
-
phong_renderer = MeshRenderer(
|
| 112 |
-
rasterizer=MeshRasterizer(
|
| 113 |
-
cameras=cameras,
|
| 114 |
-
raster_settings=raster_settings
|
| 115 |
-
),
|
| 116 |
-
shader=HardPhongShader(device=device, lights=lights, cameras=cameras)
|
| 117 |
-
)
|
| 118 |
|
| 119 |
-
|
| 120 |
-
x3d = x3d * 1.0
|
| 121 |
-
verts = torch.from_numpy(x3d).to(device)
|
| 122 |
-
verts = pre_process_mesh_pascal(verts)
|
| 123 |
-
faces = torch.from_numpy(xface).to(device)
|
| 124 |
-
verts_rgb = torch.ones_like(verts)[None]
|
| 125 |
-
# verts_rgb = torch.ones_like(verts)[None] * torch.Tensor(color).view(1, 1, 3).to(verts.device)
|
| 126 |
-
textures = Textures(verts_rgb.to(device))
|
| 127 |
-
meshes = Meshes(verts=[verts], faces=[faces], textures=textures)
|
| 128 |
-
# meshes = Meshes(verts=[verts], faces=[faces])
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
image = phong_renderer(meshes_world=meshes.clone(), R=R, T=T)
|
| 133 |
-
image = image[:, ..., :3]
|
| 134 |
-
box_ = bbt.box_by_shape(crop_size, (render_image_size // 2,) * 2)
|
| 135 |
-
bbox = box_.bbox
|
| 136 |
-
image = image[:, bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], :]
|
| 137 |
-
image = torch.squeeze(image).detach().cpu().numpy()
|
| 138 |
-
image = np.array((image / image.max()) * 255).astype(np.uint8)
|
| 139 |
|
| 140 |
-
|
| 141 |
-
dx = int(-cx + w/2)
|
| 142 |
-
dy = int(-cy + h/2)
|
| 143 |
-
image_pad = np.pad(image, ((abs(dy), abs(dy)), (abs(dx), abs(dx)), (0, 0)), mode='edge')
|
| 144 |
-
image = image_pad[dy+abs(dy):dy+abs(dy)+image.shape[0], dx+abs(dx):dx+abs(dx)+image.shape[1]]
|
| 145 |
|
| 146 |
-
|
| 147 |
|
| 148 |
|
| 149 |
with gr.Blocks() as demo:
|
|
|
|
| 1 |
+
import os
|
| 2 |
import gradio as gr
|
| 3 |
import numpy as np
|
| 4 |
+
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
def render(azimuth, elevation, theta, dist, category, unit):
|
| 8 |
+
img_id = np.random.randint(0, 10000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
+
os.system(f'python render.py --azimuth {azimuth} --elevation {elevation} --theta {theta} --dist {dist} --category {category} --unit {unit} --img_id {img_id}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
img = Image.open(f'{img_id:05d}.png')
|
| 13 |
+
os.system(f'rm {img_id:05d}.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
return np.array(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
os.system('sh setup.sh')
|
| 18 |
|
| 19 |
|
| 20 |
with gr.Blocks() as demo:
|
render.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import BboxTools as bbt
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from pytorch3d.renderer import RasterizationSettings, PerspectiveCameras, MeshRasterizer, MeshRenderer, HardPhongShader, BlendParams, camera_position_from_spherical_angles, look_at_rotation, PointLights
|
| 7 |
+
from pytorch3d.renderer import TexturesVertex as Textures
|
| 8 |
+
from pytorch3d.structures import Meshes
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
mesh_paths = {
|
| 12 |
+
"Aeroplane": "CAD_selected/aeroplane.off",
|
| 13 |
+
"Bicycle": "CAD_selected/bicycle.off",
|
| 14 |
+
"Boat": "CAD_selected/boat.off",
|
| 15 |
+
"Bottle": "CAD_selected/bottle.off",
|
| 16 |
+
"Bus": "CAD_selected/bus.off",
|
| 17 |
+
"Car": "CAD_selected/car.off",
|
| 18 |
+
"Chair": "CAD_selected/chair.off",
|
| 19 |
+
"Diningtable": "CAD_selected/diningtable.off",
|
| 20 |
+
"Motorbike": "CAD_selected/motorbike.off",
|
| 21 |
+
"Sofa": "CAD_selected/sofa.off",
|
| 22 |
+
"Train": "CAD_selected/train.off",
|
| 23 |
+
"Tvmonitor": "CAD_selected/tvmonitor.off",
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def parse_args():
|
| 28 |
+
parser = argparse.ArgumentParser(description='Render off')
|
| 29 |
+
parser.add_argument('--azimuth', type=float)
|
| 30 |
+
parser.add_argument('--elevation', type=float)
|
| 31 |
+
parser.add_argument('--theta', type=float)
|
| 32 |
+
parser.add_argument('--dist', type=float)
|
| 33 |
+
parser.add_argument('--category', type=str)
|
| 34 |
+
parser.add_argument('--unit', type=str)
|
| 35 |
+
parser.add_argument('--img_id', type=int)
|
| 36 |
+
return parser.parse_args()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def rotation_theta(theta, device_=None):
|
| 40 |
+
# cos -sin 0
|
| 41 |
+
# sin cos 0
|
| 42 |
+
# 0 0 1
|
| 43 |
+
if type(theta) == float:
|
| 44 |
+
if device_ is None:
|
| 45 |
+
device_ = 'cpu'
|
| 46 |
+
theta = torch.ones((1, 1, 1)).to(device_) * theta
|
| 47 |
+
else:
|
| 48 |
+
if device_ is None:
|
| 49 |
+
device_ = theta.device
|
| 50 |
+
theta = theta.view(-1, 1, 1)
|
| 51 |
+
|
| 52 |
+
mul_ = torch.Tensor([[1, 0, 0, 0, 1, 0, 0, 0, 0], [0, -1, 0, 1, 0, 0, 0, 0, 0]]).view(1, 2, 9).to(device_)
|
| 53 |
+
bia_ = torch.Tensor([0] * 8 + [1]).view(1, 1, 9).to(device_)
|
| 54 |
+
|
| 55 |
+
# [n, 1, 2]
|
| 56 |
+
cos_sin = torch.cat((torch.cos(theta), torch.sin(theta)), dim=2).to(device_)
|
| 57 |
+
|
| 58 |
+
# [n, 1, 2] @ [1, 2, 9] + [1, 1, 9] => [n, 1, 9] => [n, 3, 3]
|
| 59 |
+
trans = torch.matmul(cos_sin, mul_) + bia_
|
| 60 |
+
trans = trans.view(-1, 3, 3)
|
| 61 |
+
|
| 62 |
+
return trans
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def campos_to_R_T(campos, theta, device='cpu', at=((0, 0, 0),), up=((0, 1, 0), )):
|
| 66 |
+
R = look_at_rotation(campos, at=at, device=device, up=up) # (n, 3, 3)
|
| 67 |
+
R = torch.bmm(R, rotation_theta(theta, device_=device))
|
| 68 |
+
T = -torch.bmm(R.transpose(1, 2), campos.unsqueeze(2))[:, :, 0] # (1, 3)
|
| 69 |
+
return R, T
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def load_off(off_file_name, to_torch=False):
|
| 73 |
+
file_handle = open(off_file_name)
|
| 74 |
+
|
| 75 |
+
file_list = file_handle.readlines()
|
| 76 |
+
n_points = int(file_list[1].split(' ')[0])
|
| 77 |
+
all_strings = ''.join(file_list[2:2 + n_points])
|
| 78 |
+
array_ = np.fromstring(all_strings, dtype=np.float32, sep='\n')
|
| 79 |
+
|
| 80 |
+
all_strings = ''.join(file_list[2 + n_points:])
|
| 81 |
+
array_int = np.fromstring(all_strings, dtype=np.int32, sep='\n')
|
| 82 |
+
|
| 83 |
+
array_ = array_.reshape((-1, 3))
|
| 84 |
+
|
| 85 |
+
if not to_torch:
|
| 86 |
+
return array_, array_int.reshape((-1, 4))[:, 1::]
|
| 87 |
+
else:
|
| 88 |
+
return torch.from_numpy(array_), torch.from_numpy(array_int.reshape((-1, 4))[:, 1::])
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def pre_process_mesh_pascal(verts):
|
| 92 |
+
verts = torch.cat((verts[:, 0:1], verts[:, 2:3], -verts[:, 1:2]), dim=1)
|
| 93 |
+
return verts
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def render(azimuth, elevation, theta, dist, category, unit, img_id):
|
| 97 |
+
azimuth = float(azimuth)
|
| 98 |
+
elevation = float(elevation)
|
| 99 |
+
theta = float(theta)
|
| 100 |
+
dist = float(dist)
|
| 101 |
+
|
| 102 |
+
h, w = 256, 256
|
| 103 |
+
render_image_size = max(h, w)
|
| 104 |
+
crop_size = (256, 256)
|
| 105 |
+
device = 'cpu'
|
| 106 |
+
|
| 107 |
+
cameras = PerspectiveCameras(focal_length=12.0, device=device)
|
| 108 |
+
raster_settings = RasterizationSettings(
|
| 109 |
+
image_size=render_image_size,
|
| 110 |
+
blur_radius=0.0,
|
| 111 |
+
faces_per_pixel=1,
|
| 112 |
+
bin_size=0
|
| 113 |
+
)
|
| 114 |
+
raster_settings1 = RasterizationSettings(
|
| 115 |
+
image_size=render_image_size // 8,
|
| 116 |
+
blur_radius=0.0,
|
| 117 |
+
faces_per_pixel=1,
|
| 118 |
+
bin_size=0
|
| 119 |
+
)
|
| 120 |
+
rasterizer = MeshRasterizer(
|
| 121 |
+
cameras=cameras,
|
| 122 |
+
raster_settings=raster_settings1
|
| 123 |
+
)
|
| 124 |
+
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
|
| 125 |
+
phong_renderer = MeshRenderer(
|
| 126 |
+
rasterizer=MeshRasterizer(
|
| 127 |
+
cameras=cameras,
|
| 128 |
+
raster_settings=raster_settings
|
| 129 |
+
),
|
| 130 |
+
shader=HardPhongShader(device=device, lights=lights, cameras=cameras)
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
x3d, xface = load_off(mesh_paths[category])
|
| 134 |
+
x3d = x3d * 1.0
|
| 135 |
+
verts = torch.from_numpy(x3d).to(device)
|
| 136 |
+
verts = pre_process_mesh_pascal(verts)
|
| 137 |
+
faces = torch.from_numpy(xface).to(device)
|
| 138 |
+
verts_rgb = torch.ones_like(verts)[None]
|
| 139 |
+
# verts_rgb = torch.ones_like(verts)[None] * torch.Tensor(color).view(1, 1, 3).to(verts.device)
|
| 140 |
+
textures = Textures(verts_rgb.to(device))
|
| 141 |
+
meshes = Meshes(verts=[verts], faces=[faces], textures=textures)
|
| 142 |
+
# meshes = Meshes(verts=[verts], faces=[faces])
|
| 143 |
+
|
| 144 |
+
C = camera_position_from_spherical_angles(dist, elevation, azimuth, degrees=(unit=='Degree'), device=device)
|
| 145 |
+
R, T = campos_to_R_T(C, theta, device=device)
|
| 146 |
+
image = phong_renderer(meshes_world=meshes.clone(), R=R, T=T)
|
| 147 |
+
image = image[:, ..., :3]
|
| 148 |
+
box_ = bbt.box_by_shape(crop_size, (render_image_size // 2,) * 2)
|
| 149 |
+
bbox = box_.bbox
|
| 150 |
+
image = image[:, bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], :]
|
| 151 |
+
image = torch.squeeze(image).detach().cpu().numpy()
|
| 152 |
+
image = np.array((image / image.max()) * 255).astype(np.uint8)
|
| 153 |
+
|
| 154 |
+
cx, cy = (128, 128)
|
| 155 |
+
dx = int(-cx + w/2)
|
| 156 |
+
dy = int(-cy + h/2)
|
| 157 |
+
image_pad = np.pad(image, ((abs(dy), abs(dy)), (abs(dx), abs(dx)), (0, 0)), mode='edge')
|
| 158 |
+
image = image_pad[dy+abs(dy):dy+abs(dy)+image.shape[0], dx+abs(dx):dx+abs(dx)+image.shape[1]]
|
| 159 |
+
Image.fromarray(image).save(f'{img_id:05d}.png')
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
if __name__ == '__main__':
|
| 163 |
+
args = parse_args()
|
| 164 |
+
render(args.azimuth, args.elevation, args.theta, args.dist, args.category, args.unit, args.img_id)
|
setup.sh
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip install git+https://github.com/facebookresearch/pytorch3d.git@stable
|