omnifall / prepare_oops_videos.py
simplexsigil's picture
Added OF-ITW download logic
b1da356
#!/usr/bin/env python3
"""Prepare OOPS videos for OF-ItW (OmniFall In-the-Wild).
Streams the OOPS dataset archive and extracts only the 818 videos used in
OF-ItW, renamed to match the OF-ItW path convention. By default, the archive
is streamed directly from the OOPS website (~45GB) without writing it to disk.
Only the output videos (~2.6GB) are saved.
Usage:
# Stream from the web (no local archive needed):
python prepare_oops_videos.py --output_dir /path/to/oops_prepared
# Use an already-downloaded archive:
python prepare_oops_videos.py --output_dir /path/to/oops_prepared \
--oops_archive /path/to/video_and_anns.tar.gz
# Then load with the dataset builder:
ds = load_dataset("simplexsigil2/omnifall", "of-itw",
include_video=True,
oops_video_dir="/path/to/oops_prepared",
trust_remote_code=True)
"""
import argparse
import csv
import os
import subprocess
import tarfile
OOPS_URL = "https://oops.cs.columbia.edu/data/video_and_anns.tar.gz"
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
MAPPING_FILE = os.path.join(SCRIPT_DIR, "data_files", "oops_video_mapping.csv")
def load_mapping():
"""Load the OOPS-to-ITW filename mapping from the repo."""
if not os.path.exists(MAPPING_FILE):
raise FileNotFoundError(
f"Mapping file not found: {MAPPING_FILE}\n"
"Make sure you run this script from the OmniFall dataset directory."
)
mapping = {}
with open(MAPPING_FILE) as f:
reader = csv.DictReader(f)
for row in reader:
mapping[row["oops_path"]] = row["itw_path"]
return mapping
def _to_stdout_cmd(source, member):
"""Build command to extract a single tar member to stdout."""
if source.startswith("http://") or source.startswith("https://"):
return f'curl -sL "{source}" | tar -xzf - --to-stdout "{member}"', True
elif source.endswith(".tar.gz") or source.endswith(".tgz"):
return ["tar", "-xzf", source, "--to-stdout", member], False
else:
return ["tar", "-xf", source, "--to-stdout", member], False
def extract_videos(source, mapping, output_dir):
"""Stream through the OOPS archive and extract matching videos.
The archive has a nested structure: the outer tar contains
oops_dataset/video.tar.gz, which contains the actual video files.
We pipe the inner tar.gz to stdout and selectively extract only
the 818 videos in our mapping.
"""
total = len(mapping)
print(f"Extracting {total} videos from OOPS archive...")
if source.startswith("http"):
print("(Streaming ~45GB from web, no local disk space needed)")
print("(This may take 30-60 minutes depending on connection speed)")
else:
print("(Reading from local archive)")
os.makedirs(os.path.join(output_dir, "falls"), exist_ok=True)
found = 0
remaining = set(mapping.keys())
cmd, use_shell = _to_stdout_cmd(source, "oops_dataset/video.tar.gz")
proc = subprocess.Popen(
cmd, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
try:
with tarfile.open(fileobj=proc.stdout, mode="r|gz") as tar:
for member in tar:
if not remaining:
break
if member.name in remaining:
itw_path = mapping[member.name]
out_path = os.path.join(output_dir, itw_path)
f = tar.extractfile(member)
if f is not None:
with open(out_path, "wb") as out_f:
while True:
chunk = f.read(1024 * 1024)
if not chunk:
break
out_f.write(chunk)
f.close()
found += 1
remaining.discard(member.name)
if found % 50 == 0:
print(f" Extracted {found}/{total} videos...")
finally:
proc.stdout.close()
proc.wait()
print(f"Extracted {found}/{total} videos.")
if remaining:
print(f"WARNING: {len(remaining)} videos not found in archive:")
for p in sorted(remaining)[:10]:
print(f" {p}")
if len(remaining) > 10:
print(f" ... and {len(remaining) - 10} more")
return found
def main():
parser = argparse.ArgumentParser(
description="Prepare OOPS videos for OF-ItW.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
"--output_dir", required=True,
help="Directory to place the prepared videos (will contain falls/*.mp4).",
)
parser.add_argument(
"--oops_archive", default=None,
help="Path to already-downloaded video_and_anns.tar.gz (or .tar). "
"If not provided, streams directly from the OOPS website.",
)
args = parser.parse_args()
output_dir = os.path.abspath(args.output_dir)
os.makedirs(output_dir, exist_ok=True)
# Load the pre-computed mapping from the repo
print("Loading OOPS-to-ITW video mapping...")
mapping = load_mapping()
print(f" {len(mapping)} videos to extract.")
# Determine source
if args.oops_archive:
source = os.path.abspath(args.oops_archive)
if not os.path.exists(source):
raise FileNotFoundError(f"Archive not found: {source}")
print(f"Source: {source}")
else:
source = OOPS_URL
print(f"Source: {source}")
# Single streaming pass
found = extract_videos(source, mapping, output_dir)
# Summary
print()
print("=" * 60)
print("Preparation complete!")
print(f" Output directory: {output_dir}")
print(f" Videos extracted: {found}/{len(mapping)}")
print()
print("To load OF-ItW with videos:")
print()
print(" from datasets import load_dataset")
print(f' ds = load_dataset("simplexsigil2/omnifall", "of-itw",')
print(f' include_video=True,')
print(f' oops_video_dir="{output_dir}",')
print(f' trust_remote_code=True)')
if __name__ == "__main__":
main()