| | import numpy as np |
| | import imageio |
| | import os |
| | import argparse |
| | from tqdm import tqdm |
| | from .renderer import get_renderer |
| |
|
| |
|
| | def get_rotation(theta=np.pi / 3): |
| | import mGPT.utils.rotation_conversions as geometry |
| | import torch |
| | axis = torch.tensor([0, 1, 0], dtype=torch.float) |
| | axisangle = theta * axis |
| | matrix = geometry.axis_angle_to_matrix(axisangle) |
| | return matrix.numpy() |
| |
|
| |
|
| | def render_video(meshes, |
| | key, |
| | action, |
| | renderer, |
| | savepath, |
| | backgrounds, |
| | cam_pose, |
| | cams=(0.75, 0.75, 0, 0.10), |
| | color=[0.11, 0.53, 0.8]): |
| | |
| | |
| | if key not in ["real", "ntf", "side"]: |
| | w = int(key) / 6.0 |
| | |
| | |
| | |
| | color = (1 - w) * np.array([0.75, 0.13, 0.7]) + w * np.array( |
| | [0.12, 0.7, 0.14]) |
| |
|
| | meshes = meshes - meshes[0].mean(axis=0) |
| | imgs = [] |
| | idx = 0 |
| | |
| | for mesh in tqdm(meshes, desc=f"Visualize {key}, action {action}"): |
| | |
| | |
| | if len(backgrounds.shape) == 3: |
| | background = backgrounds |
| | cam = cams |
| | elif len(backgrounds.shape) == 4: |
| | background = backgrounds[idx] |
| | cam = cams[idx] |
| | idx += 1 |
| | |
| | img = renderer.render(background, |
| | mesh, |
| | cam, |
| | color=color, |
| | cam_pose=cam_pose) |
| | imgs.append(img) |
| | |
| |
|
| | imgs = np.array(imgs) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|
| | writer = imageio.get_writer(savepath, fps=30) |
| | for cimg in imgs: |
| | writer.append_data(cimg) |
| | writer.close() |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("filename") |
| | opt = parser.parse_args() |
| | filename = opt.filename |
| | savefolder = os.path.splitext(filename)[0] |
| | os.makedirs(savefolder, exist_ok=True) |
| |
|
| | output = np.load(filename) |
| |
|
| | if output.shape[0] == 3: |
| | visualization, generation, reconstruction = output |
| | output = { |
| | "visualization": visualization, |
| | "generation": generation, |
| | "reconstruction": reconstruction |
| | } |
| | else: |
| | |
| | |
| | output = { |
| | f"generation_{key}": output[key] |
| | for key in range(len(output)) |
| | } |
| |
|
| | width = 1024 |
| | height = 1024 |
| |
|
| | background = np.zeros((height, width, 3)) |
| | renderer = get_renderer(width, height) |
| |
|
| | |
| | if output["generation_3"].shape[-1] == 100: |
| | output["generation_0"] = output["generation_0"][:, :, :, :40] |
| | output["generation_1"] = output["generation_1"][:, :, :, :60] |
| | output["generation_2"] = output["generation_2"][:, :, :, :80] |
| | output["generation_3"] = output["generation_3"][:, :, :, :100] |
| | elif output["generation_3"].shape[-1] == 160: |
| | print("160 mode") |
| | output["generation_0"] = output["generation_0"][:, :, :, :100] |
| | output["generation_1"] = output["generation_1"][:, :, :, :120] |
| | output["generation_2"] = output["generation_2"][:, :, :, :140] |
| | output["generation_3"] = output["generation_3"][:, :, :, :160] |
| |
|
| | |
| | for key in output: |
| | vidmeshes = output[key] |
| | for action in range(len(vidmeshes)): |
| | meshes = vidmeshes[action].transpose(2, 0, 1) |
| | path = os.path.join(savefolder, |
| | "action{}_{}.mp4".format(action, key)) |
| | render_video(meshes, key, action, renderer, path, background) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|