Datasets:
File size: 1,809 Bytes
a2a6f2e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | import argparse
import io
import json
from pathlib import Path
from PIL import Image
import webdataset as wds
def decode_sample(sample: dict) -> dict:
frames = [
Image.open(io.BytesIO(sample[f"jpg-{i:02d}"])).convert("RGB")
for i in range(8)
]
metadata = json.loads(sample["json"])
return {
"id": metadata["id"],
"label": metadata["label"],
"frames": frames,
"__key__": sample["__key__"],
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Load Camera Motion WebDataset shards.")
parser.add_argument("--dataset-root", type=Path, required=True, help="Folder containing the staged HF dataset files.")
parser.add_argument("--split", choices=["train", "val", "test"], default="train")
parser.add_argument("--limit", type=int, default=3, help="Number of samples to print.")
return parser.parse_args()
def main() -> None:
args = parse_args()
shards = sorted(str(path) for path in args.dataset_root.glob(f"{args.split}-*.tar"))
if not shards:
raise SystemExit(f"No shards found for split {args.split} in {args.dataset_root}")
dataset = wds.WebDataset(shards, shardshuffle=False).map(decode_sample)
for index, sample in enumerate(dataset):
print(
json.dumps(
{
"index": index,
"id": sample["id"],
"label": sample["label"],
"num_frames": len(sample["frames"]),
"frame_size": sample["frames"][0].size,
"webdataset_key": sample["__key__"],
},
indent=2,
)
)
if index + 1 >= args.limit:
break
if __name__ == "__main__":
main()
|