| import io |
| import json |
| import numpy as np |
| import torch |
| import webdataset as wds |
| from PIL import Image |
| from concurrent.futures import ThreadPoolExecutor |
| import os |
| from huggingface_hub import HfApi |
|
|
| |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
| class CARLAVideoLoader: |
| def __init__( |
| self, |
| hf_repo="mkxdxd/carla-dataset", |
| town="Town01", |
| actor_type="pedestrian", |
| frames_per_scene=93, |
| batch_size=1, |
| num_workers=4, |
| decode_workers=32, |
| shuffle=True |
| ): |
| """ |
| Custom video loader for CARLA Stage2 dataset in WebDataset format. |
| Optimized for 1-tar-N-scenes format with parallel decoding. |
| """ |
| self.hf_repo = hf_repo |
| self.town = town |
| self.actor_type = actor_type |
| self.frames_per_scene = frames_per_scene |
| self.batch_size = batch_size |
| self.num_workers = num_workers |
| self.decode_workers = decode_workers |
| self.shuffle = shuffle |
| |
| |
| api = HfApi() |
| all_files = api.list_repo_files(repo_id=hf_repo, repo_type="dataset") |
| |
| self.tar_urls = [] |
| prefix = f"{town}/{actor_type}/" |
| for f in all_files: |
| if f.startswith(prefix) and f.endswith(".tar"): |
| self.tar_urls.append(f"https://huggingface.co/datasets/{hf_repo}/resolve/main/{f}") |
| |
| if not self.tar_urls: |
| raise ValueError(f"No tar files found for {town}/{actor_type} in {hf_repo}") |
| |
| print(f"✅ Found {len(self.tar_urls)} shards for {town}/{actor_type}") |
| |
| |
| self.dataset = ( |
| wds.WebDataset(self.tar_urls, shardshuffle=self.shuffle, handler=wds.warn_and_continue) |
| .to_tuple("__key__", "rgb.png", "depth.npy", "camera.json", "metadata.json", handler=wds.warn_and_continue) |
| .map(lambda x: ( |
| x[0], |
| { |
| "rgb.png": x[1], |
| "depth.npy": x[2], |
| "camera.json": x[3], |
| "metadata.json": x[4], |
| } |
| ), handler=wds.warn_and_continue) |
| .compose(self._frames_to_video_sequence) |
| ) |
| |
| |
| self.dataloader = torch.utils.data.DataLoader( |
| self.dataset, |
| batch_size=self.batch_size, |
| num_workers=self.num_workers, |
| prefetch_factor=2, |
| persistent_workers=True, |
| pin_memory=True, |
| |
| |
| collate_fn=self._collate_fn |
| ) |
|
|
| def _decode_frame(self, sample): |
| """Decodes raw bytes into tensors.""" |
| |
| rgb = Image.open(io.BytesIO(sample["rgb.png"])).convert("RGB") |
| rgb = torch.from_numpy(np.array(rgb)).permute(2, 0, 1) |
|
|
| |
| depth = np.load(io.BytesIO(sample["depth.npy"])) |
| depth = torch.from_numpy(depth) |
|
|
| |
| camera = json.loads(sample["camera.json"]) |
| metadata = json.loads(sample["metadata.json"]) |
|
|
| return rgb, depth, camera, metadata |
|
|
| def _frames_to_video_sequence(self, it): |
| """WebDataset 'compose' function to group individual frames into full videos.""" |
| buffer = [] |
| |
| for key, sample in it: |
| buffer.append(sample) |
| |
| if len(buffer) == self.frames_per_scene: |
| |
| with ThreadPoolExecutor(max_workers=self.decode_workers) as executor: |
| decoded = list(executor.map(self._decode_frame, buffer)) |
| |
| |
| rgbs = torch.stack([d[0] for d in decoded]) |
| depths = torch.stack([d[1] for d in decoded]) |
| cameras = [d[2] for d in decoded] |
| metas = [d[3] for d in decoded] |
| |
| |
| |
| base_key = key.rsplit("_", 1)[0] |
| |
| yield { |
| "video": rgbs, |
| "depth": depths, |
| "camera": cameras, |
| "metadata": metas, |
| "scene_id": base_key |
| } |
| buffer = [] |
|
|
| def _collate_fn(self, batch): |
| """ |
| Handle batching. |
| If batch_size > 1, tensors are stacked, metadata remains as lists. |
| """ |
| if len(batch) == 0: |
| return {} |
| |
| res = { |
| "video": torch.stack([b["video"] for b in batch]), |
| "depth": torch.stack([b["depth"] for b in batch]), |
| "scene_id": [b["scene_id"] for b in batch], |
| "camera": [b["camera"] for b in batch], |
| "metadata": [b["metadata"] for b in batch], |
| } |
| return res |
|
|
| def __iter__(self): |
| return iter(self.dataloader) |
|
|
| |
| if __name__ == "__main__": |
| import time |
|
|
| print("🚀 Initializing CARLA Video Loader...") |
| loader = CARLAVideoLoader( |
| hf_repo="mkxdxd/carla-dataset", |
| town="Town01", |
| actor_type="pedestrian", |
| batch_size=2, |
| num_workers=4, |
| decode_workers=16 |
| ) |
|
|
| print("🎬 Starting stream...") |
| start_time = time.time() |
| |
| for i, batch in enumerate(loader): |
| print(f"\n📦 Batch {i+1}") |
| print(f" - Video Shape: {batch['video'].shape}") |
| print(f" - Depth Shape: {batch['depth'].shape}") |
| print(f" - Scenes: {batch['scene_id']}") |
| |
| |
| first_frame_meta = batch['metadata'][0][0] |
| print(f" - Meta (B0, T0): {first_frame_meta['town']}, frame {first_frame_meta['frame_id']}") |
| |
| if i >= 2: |
| break |
| |
| total_time = time.time() - start_time |
| print(f"\n✅ Test finished in {total_time:.2f}s") |
|
|