| |
|
| |
|
| | from os.path import dirname, join |
| | import webdataset as wds |
| | from PIL import Image |
| | import io |
| | import json |
| | import numpy as np |
| | import argparse |
| | import matplotlib |
| | import matplotlib.pyplot as plt |
| | import os |
| | try: |
| | import imageio |
| | HAS_IMAGEIO = True |
| | except ImportError: |
| | HAS_IMAGEIO = False |
| | try: |
| | import cv2 |
| | HAS_CV2 = True |
| | except ImportError: |
| | HAS_CV2 = False |
| |
|
| |
|
| |
|
| |
|
| | def dump_video(image_seq, output_path, fps=30, codec='libx264', quality=8): |
| | """ |
| | Dump a sequence of PIL Images to a video file. |
| | |
| | Args: |
| | image_seq: List of PIL Images |
| | output_path: Output video file path (e.g., 'output.mp4') |
| | fps: Frames per second (default: 30) |
| | codec: Video codec (default: 'libx264') |
| | quality: Video quality, 0-10, higher is better (default: 8) |
| | """ |
| | if not image_seq: |
| | print("Warning: Empty image sequence, skipping video dump") |
| | return |
| | |
| | if HAS_IMAGEIO: |
| | |
| | frames = [] |
| | for img in image_seq: |
| | |
| | frames.append(np.array(img)) |
| | |
| | |
| | |
| | try: |
| | |
| | imageio.mimwrite(output_path, frames, fps=fps, codec=codec, quality=quality) |
| | except (AttributeError, TypeError): |
| | |
| | try: |
| | writer = imageio.get_writer(output_path, fps=fps, codec=codec) |
| | for frame in frames: |
| | writer.append_data(frame) |
| | writer.close() |
| | except Exception: |
| | |
| | writer = imageio.get_writer(output_path, fps=fps) |
| | for frame in frames: |
| | writer.append_data(frame) |
| | writer.close() |
| | print(f"Video saved to {output_path} using imageio") |
| | |
| | elif HAS_CV2: |
| | |
| | if not image_seq: |
| | return |
| | |
| | |
| | first_img = image_seq[0] |
| | height, width = first_img.size[1], first_img.size[0] |
| | |
| | |
| | fourcc = cv2.VideoWriter_fourcc(*codec if len(codec) == 4 else 'mp4v') |
| | out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) |
| | |
| | for img in image_seq: |
| | |
| | img_array = np.array(img) |
| | if len(img_array.shape) == 3: |
| | if img_array.shape[2] == 3: |
| | img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) |
| | elif img_array.shape[2] == 4: |
| | img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2BGR) |
| | out.write(img_array) |
| | |
| | out.release() |
| | print(f"Video saved to {output_path} using OpenCV") |
| | |
| | else: |
| | raise ImportError("Neither imageio nor cv2 is available. Please install one: pip install imageio[ffmpeg] or pip install opencv-python") |
| |
|
| |
|
| | |
| | def parse_depth_16bit(depth_image: Image.Image, max_depth: float) -> np.ndarray: |
| | """Parse 16-bit depth image back to actual depth values.""" |
| | |
| | depth_array = np.array(depth_image, dtype=np.uint16) |
| | |
| | |
| | depth_normalized = depth_array.astype(np.float32) / 65535.0 |
| | actual_depth = depth_normalized * max_depth |
| | |
| | return actual_depth |
| |
|
| |
|
| |
|
| |
|
| | def colorize_depth_map(depth, mask=None, reverse_color=False): |
| | from decord import VideoReader,cpu |
| | |
| | cm = matplotlib.colormaps["Spectral"] |
| |
|
| | |
| | if reverse_color: |
| | img_colored_np = cm(1 - depth, bytes=False)[:, :, 0:3] |
| | else: |
| | img_colored_np = cm(depth, bytes=False)[:, :, 0:3] |
| |
|
| | depth_colored = (img_colored_np * 255).astype(np.uint8) |
| | |
| | |
| | |
| | |
| | |
| | depth_colored_img = Image.fromarray(depth_colored) |
| | return depth_colored_img |
| |
|
| |
|
| | if __name__ == '__main__': |
| |
|
| | args = argparse.ArgumentParser() |
| | args.add_argument('--data_path', type=str, default='data/TransPhy3D/parametric_train/training/0_materials.000000.tar') |
| | args.add_argument('--output_path', type=str, default='output') |
| | args = args.parse_args() |
| |
|
| | os.makedirs(args.output_path, exist_ok=True) |
| |
|
| | |
| | dataset = wds.WebDataset(args.data_path) |
| | data = {} |
| |
|
| | depth_seq_raw = [] |
| | depth_max_values = [] |
| | normal_seq = [] |
| | rgbs_seq = [] |
| | meta_info =[] |
| |
|
| | |
| | for sample in dataset: |
| | depth_img = None |
| | max_depth = None |
| | for key, value in sample.items(): |
| | |
| | if not isinstance(value, (bytes, bytearray)): |
| | continue |
| | |
| | |
| | if key == 'depth.png': |
| | |
| | depth_img = Image.open(io.BytesIO(value)) |
| | elif key == 'depth.json': |
| | |
| | depth_info = json.loads(value) |
| | max_depth = depth_info.get('max_depth', None) |
| | elif key == 'normal.png': |
| | img = Image.open(io.BytesIO(value)) |
| | normal_seq.append(img) |
| | elif key == 'image.png': |
| | img = Image.open(io.BytesIO(value)) |
| | rgbs_seq.append(img) |
| | elif key.endswith('.json'): |
| | meta_info.append(json.loads(value)) |
| | |
| | |
| | if depth_img is not None: |
| | depth_seq_raw.append(depth_img) |
| | depth_max_values.append(max_depth) |
| | |
| |
|
| | |
| | |
| | depth_seq_vis = [] |
| | for depth_img, max_depth in zip(depth_seq_raw, depth_max_values): |
| | if max_depth is not None: |
| | |
| | |
| | actual_depth = parse_depth_16bit(depth_img, max_depth) |
| | |
| | |
| | depth_normalized = actual_depth / max_depth |
| | depth_colored_img = colorize_depth_map(depth_normalized) |
| | |
| | depth_seq_vis.append(depth_colored_img) |
| | else: |
| | |
| | depth_seq_vis.append(depth_img.convert('L')) |
| |
|
| |
|
| |
|
| | dump_video(rgbs_seq, join(args.output_path, 'output_rgb.mp4'), fps=30) |
| | dump_video(normal_seq, join(args.output_path, 'output_normal.mp4'), fps=30) |
| | dump_video(depth_seq_vis, join(args.output_path, 'output_depth.mp4'), fps=30) |
| | |