File size: 6,909 Bytes
ef32553 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
#!/usr/bin/env python3
"""
Upload VR Scene Evaluation Dataset to Hugging Face Hub
This script uploads a YOLO format dataset to Hugging Face Hub as a dataset repository.
For very large datasets, you can also use the command line approach:
1. Install huggingface-hub: pip install huggingface-hub
2. Login: huggingface-cli login
3. Upload: hf upload-large-folder <username>/<repo-name> /path/to/dataset --repo-type=dataset
This Python script provides more control and better error handling.
"""
import os
import yaml
from pathlib import Path
from huggingface_hub import HfApi, login, create_repo
import shutil
import tempfile
# Configuration
DATASET_NAME = "DISCOVR" # Change this to your desired dataset name
HF_USERNAME = None # Will be set after login
DATASET_PATH = "/home/daniel/_datasets/post-2/aggregate"
REPO_TYPE = "dataset"
def load_dataset_config():
"""Load the dataset configuration from data.yaml"""
with open(os.path.join(DATASET_PATH, "data.yaml"), 'r') as f:
config = yaml.safe_load(f)
return config
def create_dataset_card(config):
"""Create a README.md file for the dataset"""
class_names = config['names']
num_classes = config['nc']
readme_content = f"""---
license: cc-by-4.0
task_categories:
- object-detection
language:
- en
tags:
- computer-vision
- object-detection
- yolo
- virtual-reality
- vr
- scene-evaluation
size_categories:
- 1K<n<10K
---
# VR Scene Evaluation Dataset
## Dataset Description
This dataset contains {num_classes} object classes for VR scene evaluation, formatted for YOLO object detection models.
### Classes ({num_classes} total):
{chr(10).join([f"- {i}: {name}" for i, name in enumerate(class_names)])}
## Dataset Structure
```
βββ train/
β βββ images/
β βββ labels/
βββ valid/
β βββ images/
β βββ labels/
βββ test/
β βββ images/
β βββ labels/
βββ data.yaml
```
## Usage
### With YOLOv8
```python
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt')
# Train the model
results = model.train(data='path/to/data.yaml', epochs=100, imgsz=640)
```
### With Hugging Face Datasets
```python
from datasets import load_dataset
dataset = load_dataset("{HF_USERNAME}/{DATASET_NAME}")
```
## License
This dataset is licensed under CC BY 4.0.
## Citation
```
@dataset{{vr_scene_evaluation,
title={{VR Scene Evaluation Dataset}},
year={{2025}},
publisher={{Hugging Face}},
version={{1.0}},
}}
```
## Original Source
This dataset was originally sourced from Roboflow:
- Workspace: my-workspace-zhz1m
- Project: vr-scene-evaluation-o1hbg
- Version: 6
- URL: https://universe.roboflow.com/my-workspace-zhz1m/vr-scene-evaluation-o1hbg/dataset/6
"""
return readme_content
def prepare_upload_directory():
"""Prepare a clean directory for upload"""
upload_dir = tempfile.mkdtemp()
# Copy essential files
files_to_copy = [
"data.yaml",
"README.dataset.txt",
"README.roboflow.txt"
]
for file in files_to_copy:
src = os.path.join(DATASET_PATH, file)
if os.path.exists(src):
shutil.copy2(src, upload_dir)
# Copy train, valid, test directories
for split in ["train", "valid", "test"]:
src_dir = os.path.join(DATASET_PATH, split)
if os.path.exists(src_dir):
dst_dir = os.path.join(upload_dir, split)
shutil.copytree(src_dir, dst_dir)
return upload_dir
def main():
global HF_USERNAME
print("=== Hugging Face Dataset Upload Script ===")
print(f"Dataset path: {DATASET_PATH}")
print(f"Dataset name: {DATASET_NAME}")
# Load dataset config
try:
config = load_dataset_config()
print(f"β Loaded dataset config: {config['nc']} classes")
except Exception as e:
print(f"β Error loading dataset config: {e}")
return
# Login to Hugging Face
print("\n1. Logging into Hugging Face...")
print("You need a Hugging Face account and access token.")
print("Get your token from: https://huggingface.co/settings/tokens")
try:
login()
api = HfApi()
user_info = api.whoami()
HF_USERNAME = user_info['name']
print(f"β Logged in as: {HF_USERNAME}")
except Exception as e:
print(f"β Login failed: {e}")
print("Make sure you have a valid token and internet connection.")
return
# Create repository
repo_id = f"{HF_USERNAME}/{DATASET_NAME}"
print(f"\n2. Creating repository: {repo_id}")
try:
create_repo(
repo_id=repo_id,
repo_type=REPO_TYPE,
private=False, # Set to True if you want a private repo
exist_ok=True
)
print("β Repository created/verified")
except Exception as e:
print(f"β Error creating repository: {e}")
return
# Prepare upload directory
print("\n3. Preparing files for upload...")
try:
upload_dir = prepare_upload_directory()
print(f"β Files prepared in: {upload_dir}")
# Create README.md
readme_content = create_dataset_card(config)
with open(os.path.join(upload_dir, "README.md"), 'w') as f:
f.write(readme_content)
print("β Dataset card created")
except Exception as e:
print(f"β Error preparing files: {e}")
return
# Upload to Hugging Face
print(f"\n4. Uploading to {repo_id}...")
print("This may take a while depending on dataset size...")
print("Using upload_large_folder for better handling of large datasets...")
try:
# Use upload_large_folder for better handling of large datasets
api.upload_large_folder(
folder_path=upload_dir,
repo_id=repo_id,
repo_type=REPO_TYPE,
num_workers=4, # Use multiple workers for faster upload
create_pr=False, # Upload directly to main branch
allow_patterns=["**/*"], # Upload all files
ignore_patterns=[".git/**", "**/.DS_Store", "**/__pycache__/**"] # Ignore system files
)
print("β Upload completed successfully!")
print(f"\nπ Your dataset is now available at:")
print(f"https://huggingface.co/datasets/{repo_id}")
except Exception as e:
print(f"β Upload failed: {e}")
print("If the upload failed due to size, you can try:")
print("1. Reducing the number of workers (num_workers parameter)")
print("2. Using the command line: hf upload-large-folder")
print("3. Splitting the dataset into smaller chunks")
return
finally:
# Clean up
shutil.rmtree(upload_dir)
print("β Temporary files cleaned up")
if __name__ == "__main__":
main() |