File size: 4,614 Bytes
138362e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
#!/usr/bin/env python3
"""
Download VLM-Gym inference dataset from Hugging Face Hub.
Usage:
# Download everything
python load_from_hf.py --output_dir ./inference_dataset
# Download only test sets (no large assets)
python load_from_hf.py --output_dir ./inference_dataset --subset test_sets
# Download specific difficulty
python load_from_hf.py --output_dir ./inference_dataset --subset easy
Examples:
from load_from_hf import download_dataset, get_dataset_path
# Download and get path
dataset_path = download_dataset()
# Use in your code
test_set_easy = dataset_path / "test_set_easy"
"""
import argparse
from pathlib import Path
from typing import Optional, List
from huggingface_hub import snapshot_download, hf_hub_download
REPO_ID = "VisGym/inference-dataset"
# Define subsets for selective downloading
SUBSETS = {
"test_sets": ["test_set_easy", "test_set_hard"],
"initial_states": ["initial_states_easy", "initial_states_hard"],
"easy": ["test_set_easy", "initial_states_easy"],
"hard": ["test_set_hard", "initial_states_hard"],
"partial_datasets": ["partial_datasets"],
"all": [
"test_set_easy",
"test_set_hard",
"initial_states_easy",
"initial_states_hard",
"partial_datasets",
],
}
def download_dataset(
output_dir: Optional[str] = None,
subset: str = "all",
repo_id: str = REPO_ID,
token: Optional[str] = None,
) -> Path:
"""
Download VLM-Gym inference dataset from Hugging Face Hub.
Args:
output_dir: Directory to download to. If None, uses HF cache.
subset: Which subset to download. Options:
- "all": Everything (default)
- "test_sets": Only test_set_easy and test_set_hard
- "initial_states": Only initial_states_easy and initial_states_hard
- "easy": Only easy difficulty (test_set + initial_states)
- "hard": Only hard difficulty (test_set + initial_states)
- "partial_datasets": Only the large asset files
repo_id: Hugging Face repository ID
token: Optional HF token for private repos
Returns:
Path to the downloaded dataset directory
"""
if subset not in SUBSETS:
raise ValueError(f"Unknown subset: {subset}. Choose from: {list(SUBSETS.keys())}")
folders = SUBSETS[subset]
# Build allow_patterns for the folders we want
allow_patterns = [f"{folder}/**" for folder in folders]
print(f"Downloading subset '{subset}' from {repo_id}...")
print(f"Folders: {folders}")
local_dir = snapshot_download(
repo_id=repo_id,
repo_type="dataset",
local_dir=output_dir,
allow_patterns=allow_patterns,
token=token,
)
print(f"Downloaded to: {local_dir}")
return Path(local_dir)
def get_dataset_path(
output_dir: Optional[str] = None,
subset: str = "all",
repo_id: str = REPO_ID,
token: Optional[str] = None,
) -> Path:
"""
Get path to dataset, downloading if necessary.
This is a convenience wrapper that downloads the dataset if not present
and returns the path.
"""
return download_dataset(output_dir, subset, repo_id, token)
def list_available_subsets():
"""Print available subsets and their contents."""
print("Available subsets:")
for name, folders in SUBSETS.items():
print(f" {name}: {', '.join(folders)}")
def main():
parser = argparse.ArgumentParser(
description="Download VLM-Gym inference dataset from Hugging Face Hub"
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output directory (default: HF cache)",
)
parser.add_argument(
"--subset",
type=str,
default="all",
choices=list(SUBSETS.keys()),
help="Which subset to download",
)
parser.add_argument(
"--repo_id",
type=str,
default=REPO_ID,
help="Hugging Face repository ID",
)
parser.add_argument(
"--token",
type=str,
default=None,
help="Hugging Face token (for private repos)",
)
parser.add_argument(
"--list-subsets",
action="store_true",
help="List available subsets and exit",
)
args = parser.parse_args()
if args.list_subsets:
list_available_subsets()
return
download_dataset(
output_dir=args.output_dir,
subset=args.subset,
repo_id=args.repo_id,
token=args.token,
)
if __name__ == "__main__":
main()
|