Spaces:
Running
on
Zero
Running
on
Zero
| from typing import Union, List | |
| import tempfile | |
| import numpy as np | |
| import PIL.Image | |
| import matplotlib.cm as cm | |
| import mediapy | |
| import torch | |
| def save_video( | |
| video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], | |
| output_video_path: str = None, | |
| fps: int = 10, | |
| crf: int = 18, | |
| ) -> str: | |
| if output_video_path is None: | |
| output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name | |
| if isinstance(video_frames[0], np.ndarray): | |
| video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] | |
| elif isinstance(video_frames[0], PIL.Image.Image): | |
| video_frames = [np.array(frame) for frame in video_frames] | |
| mediapy.write_video(output_video_path, video_frames, fps=fps, crf=crf) | |
| return output_video_path | |
| class ColorMapper: | |
| # a color mapper to map depth values to a certain colormap | |
| def __init__(self, colormap: str = "inferno"): | |
| self.colormap = torch.tensor(cm.get_cmap(colormap).colors) | |
| def apply(self, image: torch.Tensor, v_min=None, v_max=None): | |
| # assert len(image.shape) == 2 | |
| if v_min is None: | |
| v_min = image.min() | |
| if v_max is None: | |
| v_max = image.max() | |
| image = (image - v_min) / (v_max - v_min) | |
| image = (image * 255).long() | |
| image = self.colormap[image] | |
| return image | |
| def vis_sequence_depth(depths: np.ndarray, v_min=None, v_max=None): | |
| visualizer = ColorMapper() | |
| if v_min is None: | |
| v_min = depths.min() | |
| if v_max is None: | |
| v_max = depths.max() | |
| res = visualizer.apply(torch.tensor(depths), v_min=v_min, v_max=v_max).numpy() | |
| return res | |