fengyee commited on
Commit
a2a6f2e
·
verified ·
1 Parent(s): c0aae01

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -1,60 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.avro filter=lfs diff=lfs merge=lfs -text
4
- *.bin filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ckpt filter=lfs diff=lfs merge=lfs -text
7
- *.ftz filter=lfs diff=lfs merge=lfs -text
8
- *.gz filter=lfs diff=lfs merge=lfs -text
9
- *.h5 filter=lfs diff=lfs merge=lfs -text
10
- *.joblib filter=lfs diff=lfs merge=lfs -text
11
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
- *.lz4 filter=lfs diff=lfs merge=lfs -text
13
- *.mds filter=lfs diff=lfs merge=lfs -text
14
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
- *.model filter=lfs diff=lfs merge=lfs -text
16
- *.msgpack filter=lfs diff=lfs merge=lfs -text
17
- *.npy filter=lfs diff=lfs merge=lfs -text
18
- *.npz filter=lfs diff=lfs merge=lfs -text
19
- *.onnx filter=lfs diff=lfs merge=lfs -text
20
- *.ot filter=lfs diff=lfs merge=lfs -text
21
- *.parquet filter=lfs diff=lfs merge=lfs -text
22
- *.pb filter=lfs diff=lfs merge=lfs -text
23
- *.pickle filter=lfs diff=lfs merge=lfs -text
24
- *.pkl filter=lfs diff=lfs merge=lfs -text
25
- *.pt filter=lfs diff=lfs merge=lfs -text
26
- *.pth filter=lfs diff=lfs merge=lfs -text
27
- *.rar filter=lfs diff=lfs merge=lfs -text
28
- *.safetensors filter=lfs diff=lfs merge=lfs -text
29
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
- *.tar.* filter=lfs diff=lfs merge=lfs -text
31
  *.tar filter=lfs diff=lfs merge=lfs -text
32
- *.tflite filter=lfs diff=lfs merge=lfs -text
33
- *.tgz filter=lfs diff=lfs merge=lfs -text
34
- *.wasm filter=lfs diff=lfs merge=lfs -text
35
- *.xz filter=lfs diff=lfs merge=lfs -text
36
- *.zip filter=lfs diff=lfs merge=lfs -text
37
- *.zst filter=lfs diff=lfs merge=lfs -text
38
- *tfevents* filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - uncompressed
40
- *.pcm filter=lfs diff=lfs merge=lfs -text
41
- *.sam filter=lfs diff=lfs merge=lfs -text
42
- *.raw filter=lfs diff=lfs merge=lfs -text
43
- # Audio files - compressed
44
- *.aac filter=lfs diff=lfs merge=lfs -text
45
- *.flac filter=lfs diff=lfs merge=lfs -text
46
- *.mp3 filter=lfs diff=lfs merge=lfs -text
47
- *.ogg filter=lfs diff=lfs merge=lfs -text
48
- *.wav filter=lfs diff=lfs merge=lfs -text
49
- # Image files - uncompressed
50
- *.bmp filter=lfs diff=lfs merge=lfs -text
51
- *.gif filter=lfs diff=lfs merge=lfs -text
52
- *.png filter=lfs diff=lfs merge=lfs -text
53
- *.tiff filter=lfs diff=lfs merge=lfs -text
54
- # Image files - compressed
55
- *.jpg filter=lfs diff=lfs merge=lfs -text
56
- *.jpeg filter=lfs diff=lfs merge=lfs -text
57
- *.webp filter=lfs diff=lfs merge=lfs -text
58
- # Video files - compressed
59
- *.mp4 filter=lfs diff=lfs merge=lfs -text
60
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.tar filter=lfs diff=lfs merge=lfs -text
2
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Camera Motion Dataset and Benchmark
3
+ tags:
4
+ - video
5
+ - webdataset
6
+ - benchmark
7
+ - camera-motion
8
+ ---
9
+
10
+ # Camera Motion Dataset and Benchmark
11
+
12
+ This repository packages the dataset and benchmark release for our paper "Geometry-Guided Camera Motion Understanding in VideoLLMs" as a Hugging Face dataset repo.
13
+
14
+ It contains:
15
+
16
+ - WebDataset tar shards for `train`, `val`, and `test`
17
+ - Split manifests (`train.json`, `val.json`, `test.json`) that map source clips to sample ids
18
+ - A multiple-choice VQA benchmark (`cam-motion.jsonl`)
19
+ - Small example scripts under `examples/`
20
+
21
+ ## Files
22
+
23
+ | Path | Description |
24
+ | --- | --- |
25
+ | `train-*.tar` | Training split WebDataset shards |
26
+ | `val-*.tar` | Validation split WebDataset shards |
27
+ | `test-*.tar` | Test split WebDataset shards |
28
+ | `train.json` | Training manifest with `video_path`, `clip_index`, `label`, and `id` |
29
+ | `val.json` | Validation manifest with the same schema |
30
+ | `test.json` | Test manifest with the same schema |
31
+ | `cam-motion.jsonl` | Multiple-choice benchmark for camera-motion understanding |
32
+ | `examples/load_webdataset.py` | Minimal WebDataset loader example |
33
+ | `examples/inspect_tar.py` | Prints the internal structure of one tar shard |
34
+ | `examples/read_benchmark.py` | Reads benchmark rows and maps them back to manifests |
35
+
36
+ ## Dataset Format
37
+
38
+ Each WebDataset sample stores 8 RGB frames resized to `560 x 560` and a JSON metadata record:
39
+
40
+ ```text
41
+ <sample_id>.jpg-00
42
+ <sample_id>.jpg-01
43
+ ...
44
+ <sample_id>.jpg-07
45
+ <sample_id>.json
46
+ ```
47
+
48
+ The JSON sidecar has the form:
49
+
50
+ ```json
51
+ {
52
+ "label": "dolly in, truck right",
53
+ "id": "00ae5c50b2b54704ad833996c20c055d"
54
+ }
55
+ ```
56
+
57
+ The manifests provide the lookup back to source provenance:
58
+
59
+ ```json
60
+ {
61
+ "video_path": "path/to/source/video.mp4",
62
+ "clip_index": 2,
63
+ "label": "dolly in, truck right",
64
+ "id": "00ae5c50b2b54704ad833996c20c055d"
65
+ }
66
+ ```
67
+
68
+ `cam-motion.jsonl` contains multiple-choice benchmark rows like:
69
+
70
+ ```json
71
+ {
72
+ "video": "path/to/source/video.mp4",
73
+ "clip_index": 2,
74
+ "conversations": [
75
+ {
76
+ "from": "human",
77
+ "value": "<video>\nIdentify the camera motion depicted in the video using standard cinematographic terminology.\nOptions:\n(A) ...\n(B) ...\n(C) ...\n(D) ..."
78
+ },
79
+ {
80
+ "from": "gpt",
81
+ "value": "Answer: (A) ..."
82
+ }
83
+ ]
84
+ }
85
+ ```
86
+
87
+ The `video` field in the benchmark is a provenance string copied from dataset construction. After release, it should be matched against `video_path` in the manifest files rather than treated as a valid local path.
88
+
89
+ ## Current Split Layout
90
+
91
+ - Manifest rows: `train=9819`, `val=1227`, `test=1228` (12274 total)
92
+ - Materialized shard samples: matches manifests exactly (0 missing)
93
+ - Benchmark rows: `12274` (one row per unique clip)
94
+
95
+ All splits are deduplicated and non-overlapping.
96
+
97
+ The example and validation scripts in this repo show how to inspect these counts locally before publishing.
98
+
99
+ ## Usage
100
+
101
+ Download the dataset repo locally, then point `webdataset` at the shard files:
102
+
103
+ ```python
104
+ import io
105
+ import json
106
+ from pathlib import Path
107
+
108
+ from PIL import Image
109
+ import webdataset as wds
110
+
111
+ dataset_root = Path("camera-motion-dataset-and-benchmark")
112
+ shards = sorted(str(p) for p in dataset_root.glob("train-*.tar"))
113
+
114
+ def decode_sample(sample):
115
+ frames = [
116
+ Image.open(io.BytesIO(sample[f"jpg-{i:02d}"])).convert("RGB")
117
+ for i in range(8)
118
+ ]
119
+ meta = json.loads(sample["json"])
120
+ return {
121
+ "id": meta["id"],
122
+ "label": meta["label"],
123
+ "frames": frames,
124
+ }
125
+
126
+ dataset = wds.WebDataset(shards, shardshuffle=False).map(decode_sample)
127
+ sample = next(iter(dataset))
128
+ print(sample["id"], sample["label"], len(sample["frames"]))
129
+ ```
130
+
131
+ To inspect the benchmark:
132
+
133
+ ```bash
134
+ python examples/read_benchmark.py --dataset-root . --show 3
135
+ ```
136
+
137
+ To inspect one tar shard:
138
+
139
+ ```bash
140
+ python examples/inspect_tar.py --tar train-000000.tar
141
+ ```
142
+
143
+ ## Suggested Hugging Face Workflow
144
+
145
+ 1. Stage a clean dataset repo from the source project using the local helper scripts.
146
+ 2. If you are rebuilding corrected manifests first, point `stage_release.py` at that dataset directory with `--dataset-dir`.
147
+ 3. Create a private dataset repo first.
148
+ 4. Upload the staged folder.
149
+ 5. Inspect the dataset card, files, and example scripts on the Hub.
150
+ 6. Make the repo public only after the release audit looks correct.
151
+
152
+ ## Citation
153
+
154
+ If you publish this dataset, replace this section with your project citation.
cam-motion.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
examples/__pycache__/inspect_tar.cpython-39.pyc ADDED
Binary file (1.72 kB). View file
 
examples/__pycache__/load_webdataset.cpython-39.pyc ADDED
Binary file (2.17 kB). View file
 
examples/__pycache__/read_benchmark.cpython-39.pyc ADDED
Binary file (1.87 kB). View file
 
examples/inspect_tar.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import io
3
+ import json
4
+ import tarfile
5
+ from pathlib import Path
6
+
7
+ from PIL import Image
8
+
9
+
10
+ def parse_args() -> argparse.Namespace:
11
+ parser = argparse.ArgumentParser(description="Inspect one Camera Motion tar shard.")
12
+ parser.add_argument("--tar", type=Path, required=True, help="Path to a tar shard.")
13
+ parser.add_argument("--limit", type=int, default=2, help="Number of sample ids to inspect.")
14
+ return parser.parse_args()
15
+
16
+
17
+ def main() -> None:
18
+ args = parse_args()
19
+
20
+ with tarfile.open(args.tar, "r") as tar:
21
+ members = tar.getnames()
22
+ sample_ids = []
23
+ for name in members:
24
+ if name.endswith(".json"):
25
+ sample_ids.append(name.rsplit(".", 1)[0])
26
+ if len(sample_ids) >= args.limit:
27
+ break
28
+
29
+ print(json.dumps({"tar": str(args.tar), "num_members": len(members), "sample_ids": sample_ids}, indent=2))
30
+
31
+ for sample_id in sample_ids:
32
+ metadata = json.loads(tar.extractfile(f"{sample_id}.json").read().decode("utf-8"))
33
+ frame_members = [f"{sample_id}.jpg-{i:02d}" for i in range(8)]
34
+ first_frame = Image.open(io.BytesIO(tar.extractfile(frame_members[0]).read()))
35
+ print(
36
+ json.dumps(
37
+ {
38
+ "sample_id": sample_id,
39
+ "frame_members": frame_members,
40
+ "metadata": metadata,
41
+ "first_frame_size": first_frame.size,
42
+ "first_frame_mode": first_frame.mode,
43
+ },
44
+ indent=2,
45
+ )
46
+ )
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
examples/load_webdataset.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import io
3
+ import json
4
+ from pathlib import Path
5
+
6
+ from PIL import Image
7
+ import webdataset as wds
8
+
9
+
10
+ def decode_sample(sample: dict) -> dict:
11
+ frames = [
12
+ Image.open(io.BytesIO(sample[f"jpg-{i:02d}"])).convert("RGB")
13
+ for i in range(8)
14
+ ]
15
+ metadata = json.loads(sample["json"])
16
+ return {
17
+ "id": metadata["id"],
18
+ "label": metadata["label"],
19
+ "frames": frames,
20
+ "__key__": sample["__key__"],
21
+ }
22
+
23
+
24
+ def parse_args() -> argparse.Namespace:
25
+ parser = argparse.ArgumentParser(description="Load Camera Motion WebDataset shards.")
26
+ parser.add_argument("--dataset-root", type=Path, required=True, help="Folder containing the staged HF dataset files.")
27
+ parser.add_argument("--split", choices=["train", "val", "test"], default="train")
28
+ parser.add_argument("--limit", type=int, default=3, help="Number of samples to print.")
29
+ return parser.parse_args()
30
+
31
+
32
+ def main() -> None:
33
+ args = parse_args()
34
+ shards = sorted(str(path) for path in args.dataset_root.glob(f"{args.split}-*.tar"))
35
+ if not shards:
36
+ raise SystemExit(f"No shards found for split {args.split} in {args.dataset_root}")
37
+
38
+ dataset = wds.WebDataset(shards, shardshuffle=False).map(decode_sample)
39
+ for index, sample in enumerate(dataset):
40
+ print(
41
+ json.dumps(
42
+ {
43
+ "index": index,
44
+ "id": sample["id"],
45
+ "label": sample["label"],
46
+ "num_frames": len(sample["frames"]),
47
+ "frame_size": sample["frames"][0].size,
48
+ "webdataset_key": sample["__key__"],
49
+ },
50
+ indent=2,
51
+ )
52
+ )
53
+ if index + 1 >= args.limit:
54
+ break
55
+
56
+
57
+ if __name__ == "__main__":
58
+ main()
examples/read_benchmark.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ from collections import defaultdict
4
+ from pathlib import Path
5
+
6
+
7
+ def build_manifest_lookup(dataset_root: Path) -> dict[tuple[str, int], list[dict]]:
8
+ lookup: dict[tuple[str, int], list[dict]] = defaultdict(list)
9
+ for split in ["train", "val", "test"]:
10
+ with open(dataset_root / f"{split}.json", "r") as f:
11
+ rows = json.load(f)
12
+ for row in rows:
13
+ lookup[(row["video_path"], row["clip_index"])].append(
14
+ {
15
+ "split": split,
16
+ "id": row["id"],
17
+ "label": row["label"],
18
+ }
19
+ )
20
+ return lookup
21
+
22
+
23
+ def parse_args() -> argparse.Namespace:
24
+ parser = argparse.ArgumentParser(description="Inspect Camera Motion benchmark rows.")
25
+ parser.add_argument("--dataset-root", type=Path, required=True, help="Folder containing the staged HF dataset files.")
26
+ parser.add_argument("--show", type=int, default=3, help="Number of benchmark rows to print.")
27
+ return parser.parse_args()
28
+
29
+
30
+ def main() -> None:
31
+ args = parse_args()
32
+ benchmark_path = args.dataset_root / "cam-motion.jsonl"
33
+ lookup = build_manifest_lookup(args.dataset_root)
34
+
35
+ with open(benchmark_path, "r") as f:
36
+ for index, line in enumerate(f):
37
+ row = json.loads(line)
38
+ matches = lookup.get((row["video"], row["clip_index"]), [])
39
+ print(
40
+ json.dumps(
41
+ {
42
+ "index": index,
43
+ "video": row["video"],
44
+ "clip_index": row["clip_index"],
45
+ "question": row["conversations"][0]["value"],
46
+ "answer": row["conversations"][1]["value"],
47
+ "manifest_matches": matches,
48
+ },
49
+ indent=2,
50
+ )
51
+ )
52
+ if index + 1 >= args.show:
53
+ break
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
test-000000.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56966c98cee7fadd63457b9c43e2d9c7bac69ebfe090866c077c67d132a0040f
3
+ size 929689600
test-000001.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7afa63f4043551d9392d5aa3b056e7c59cd6bd241fea8d9ccb3af942ab7aaff1
3
+ size 213032960
test.json ADDED
The diff for this file is too large to render. See raw diff
 
train-000000.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2dfdd1cb428a58622441808597d39e3ae220945d41190acbadd556779ceb11e
3
+ size 933560320
train-000001.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93cf34532d19d715ab92d5354ee053e300596879efaa8e825a4f482111fe80c5
3
+ size 942428160
train-000002.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d9962b969d217ef565d97daa955ea75320ccd9c7f88eb2424c8d956e5267e77
3
+ size 940902400
train-000003.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab0387defbbfba55525cb139d51ba2fb0fc07f3885b94994edaeed8b5b0b14b
3
+ size 928317440
train-000004.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e816b5714b64b980b747d87ceb63ae904d36ce5256d267acafff6d63e183bf7
3
+ size 931307520
train-000005.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d5131be21d61941d6746cd30c83ff95e9397cbef12e21c2adb815e9d14fa51
3
+ size 933365760
train-000006.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbff1931e8540c8d93ffc62e7061f5e34d37c059d1be19511962639062afc3b0
3
+ size 943452160
train-000007.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342dca7428a0d85101b91ea2157bab5d33e04861e76963c21319701670dbaea0
3
+ size 936325120
train-000008.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d561faad2b2911b97002addb8a002e7b45fc7bec7c22b481b77d9d8827b01e
3
+ size 946053120
train-000009.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4993acc1018e95a494899267d38d1caa953880f76c9dc74930cdbce22734002
3
+ size 770007040
train.json ADDED
The diff for this file is too large to render. See raw diff
 
val-000000.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b99de77bb032b74cb6f2d04f86f7a957ce981de58656c8fdb3d5937935fa301
3
+ size 944506880
val-000001.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5e1e85740fcd1046f73f9a6d344b510abb1dc83f59e097673d4e1cf9c70d46
3
+ size 211097600
val.json ADDED
The diff for this file is too large to render. See raw diff