File size: 6,652 Bytes
5b9e601 55d0a5e 5b9e601 55d0a5e 5b9e601 55d0a5e 5b9e601 55d0a5e 5b9e601 55d0a5e 5b9e601 55d0a5e 5b9e601 55d0a5e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | import io
import json
import numpy as np
import torch
import webdataset as wds
from PIL import Image
from concurrent.futures import ThreadPoolExecutor
import os
from huggingface_hub import HfApi
# 🚀 Speed up HF transfers
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
class CARLAVideoLoader:
def __init__(
self,
hf_repo="mkxdxd/carla-dataset",
town="Town01",
actor_type="pedestrian",
frames_per_scene=93,
batch_size=1,
num_workers=4,
decode_workers=32,
shuffle=True
):
"""
Custom video loader for CARLA Stage2 dataset in WebDataset format.
Optimized for 1-tar-N-scenes format with parallel decoding.
"""
self.hf_repo = hf_repo
self.town = town
self.actor_type = actor_type
self.frames_per_scene = frames_per_scene
self.batch_size = batch_size
self.num_workers = num_workers
self.decode_workers = decode_workers
self.shuffle = shuffle
# 1. Get tar URLs from HuggingFace Hub
api = HfApi()
all_files = api.list_repo_files(repo_id=hf_repo, repo_type="dataset")
self.tar_urls = []
prefix = f"{town}/{actor_type}/"
for f in all_files:
if f.startswith(prefix) and f.endswith(".tar"):
self.tar_urls.append(f"https://huggingface.co/datasets/{hf_repo}/resolve/main/{f}")
if not self.tar_urls:
raise ValueError(f"No tar files found for {town}/{actor_type} in {hf_repo}")
print(f"✅ Found {len(self.tar_urls)} shards for {town}/{actor_type}")
# 2. Build WebDataset pipeline
self.dataset = (
wds.WebDataset(self.tar_urls, shardshuffle=self.shuffle, handler=wds.warn_and_continue)
.to_tuple("__key__", "rgb.png", "depth.npy", "camera.json", "metadata.json", handler=wds.warn_and_continue)
.map(lambda x: (
x[0],
{
"rgb.png": x[1],
"depth.npy": x[2],
"camera.json": x[3],
"metadata.json": x[4],
}
), handler=wds.warn_and_continue)
.compose(self._frames_to_video_sequence)
)
# 3. Create DataLoader
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
prefetch_factor=2,
persistent_workers=True,
pin_memory=True,
# Since our compose function yields dictionaries, we handle collate carefully
# or just yield them one by one if batch_size=1
collate_fn=self._collate_fn
)
def _decode_frame(self, sample):
"""Decodes raw bytes into tensors."""
# RGB: png -> PIL -> Tensor (3, H, W)
rgb = Image.open(io.BytesIO(sample["rgb.png"])).convert("RGB")
rgb = torch.from_numpy(np.array(rgb)).permute(2, 0, 1)
# Depth: npy -> Tensor (H, W)
depth = np.load(io.BytesIO(sample["depth.npy"]))
depth = torch.from_numpy(depth)
# JSONs
camera = json.loads(sample["camera.json"])
metadata = json.loads(sample["metadata.json"])
return rgb, depth, camera, metadata
def _frames_to_video_sequence(self, it):
"""WebDataset 'compose' function to group individual frames into full videos."""
buffer = []
for key, sample in it:
buffer.append(sample)
if len(buffer) == self.frames_per_scene:
# 🚀 Parallel decoding for high performance
with ThreadPoolExecutor(max_workers=self.decode_workers) as executor:
decoded = list(executor.map(self._decode_frame, buffer))
# Unpack and stack
rgbs = torch.stack([d[0] for d in decoded]) # (T, 3, H, W)
depths = torch.stack([d[1] for d in decoded]) # (T, H, W)
cameras = [d[2] for d in decoded]
metas = [d[3] for d in decoded]
# key example: Town01/pedestrian/scene_0001_000
# base_key will be scene_0001
base_key = key.rsplit("_", 1)[0]
yield {
"video": rgbs, # (93, 3, 704, 1280)
"depth": depths, # (93, 704, 1280)
"camera": cameras, # list of 93 dicts
"metadata": metas, # list of 93 dicts
"scene_id": base_key
}
buffer = []
def _collate_fn(self, batch):
"""
Handle batching.
If batch_size > 1, tensors are stacked, metadata remains as lists.
"""
if len(batch) == 0:
return {}
res = {
"video": torch.stack([b["video"] for b in batch]), # (B, T, 3, H, W)
"depth": torch.stack([b["depth"] for b in batch]), # (B, T, H, W)
"scene_id": [b["scene_id"] for b in batch],
"camera": [b["camera"] for b in batch], # (B, T, dict)
"metadata": [b["metadata"] for b in batch], # (B, T, dict)
}
return res
def __iter__(self):
return iter(self.dataloader)
# --- Example Usage ---
if __name__ == "__main__":
import time
print("🚀 Initializing CARLA Video Loader...")
loader = CARLAVideoLoader(
hf_repo="mkxdxd/carla-dataset",
town="Town01",
actor_type="pedestrian",
batch_size=2, # Loading 2 videos at once
num_workers=4, # 4 DataLoader workers
decode_workers=16 # 16 threads for PNG decoding per scene
)
print("🎬 Starting stream...")
start_time = time.time()
for i, batch in enumerate(loader):
print(f"\n📦 Batch {i+1}")
print(f" - Video Shape: {batch['video'].shape}") # (B, 93, 3, 704, 1280)
print(f" - Depth Shape: {batch['depth'].shape}") # (B, 93, 704, 1280)
print(f" - Scenes: {batch['scene_id']}")
# Access first frame of first video in batch
first_frame_meta = batch['metadata'][0][0]
print(f" - Meta (B0, T0): {first_frame_meta['town']}, frame {first_frame_meta['frame_id']}")
if i >= 2: # Just test a few batches
break
total_time = time.time() - start_time
print(f"\n✅ Test finished in {total_time:.2f}s")
|