Datasets:
Update README.md
Browse files
README.md
CHANGED
|
@@ -6,4 +6,40 @@ configs:
|
|
| 6 |
data_files: "clips/*.tar"
|
| 7 |
- config_name: frames
|
| 8 |
data_files: "frames/*.tar"
|
| 9 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
data_files: "clips/*.tar"
|
| 7 |
- config_name: frames
|
| 8 |
data_files: "frames/*.tar"
|
| 9 |
+
---
|
| 10 |
+
## Example usage for clips:
|
| 11 |
+
### Also decoding raw binary video data and json
|
| 12 |
+
```python
|
| 13 |
+
import webdataset as wds
|
| 14 |
+
from huggingface_hub import HfFileSystem, get_token, hf_hub_url
|
| 15 |
+
import json
|
| 16 |
+
import io
|
| 17 |
+
import torch
|
| 18 |
+
import av
|
| 19 |
+
import numpy as np
|
| 20 |
+
from torch.utils.data import DataLoader
|
| 21 |
+
|
| 22 |
+
fs = HfFileSystem()
|
| 23 |
+
files = [fs.resolve_path(path) for path in fs.glob("hf://datasets/niranjangaurav17/grounding-YT-dataset/clips/*.tar")]
|
| 24 |
+
urls = [hf_hub_url(file.repo_id, file.path_in_repo, repo_type="dataset") for file in files]
|
| 25 |
+
urls = f"pipe: curl -s -L -H 'Authorization:Bearer {get_token()}' {'::'.join(urls)}"
|
| 26 |
+
|
| 27 |
+
def load_video(video_bytes):
|
| 28 |
+
container = av.open(io.BytesIO(video_bytes))
|
| 29 |
+
frames = []
|
| 30 |
+
for frame in container.decode(video=0):
|
| 31 |
+
img = frame.to_ndarray(format="rgb24")
|
| 32 |
+
frames.append(img)
|
| 33 |
+
video_tensor = torch.from_numpy(np.stack(frames))
|
| 34 |
+
return video_tensor #[T, H, W, C]
|
| 35 |
+
|
| 36 |
+
def load_json(json_bytes):
|
| 37 |
+
"""Decode JSON metadata"""
|
| 38 |
+
return json.loads(json_bytes.decode("utf-8"))
|
| 39 |
+
|
| 40 |
+
dataset = (
|
| 41 |
+
wds.WebDataset(urls,)
|
| 42 |
+
.shuffle(100)
|
| 43 |
+
.to_tuple("mp4", "json")
|
| 44 |
+
.map_tuple(load_video, load_json)
|
| 45 |
+
)
|