Datasets:
File size: 5,859 Bytes
46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 46b5521 8ae7da7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
from huggingface_hub import hf_hub_download
import h5py
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
def download_and_load_dataset(repo_id, filename="ucf_crime_features_labeled.h5"):
"""Download the HDF5 file from Hugging Face and return the local path."""
hdf5_path = hf_hub_download(
repo_id=repo_id,
filename=filename,
repo_type="dataset"
)
return hdf5_path
class UCFCrimeDataset(Dataset):
"""Dataset class for loading UCF-Crime features with temporal annotations."""
def __init__(self, hdf5_path, split=None, transform=None):
self.hdf5_path = hdf5_path
self.transform = transform
# Open the HDF5 file
self.hdf5_file = h5py.File(hdf5_path, 'r')
# Build list of video paths (category/video_name)
self.video_paths = []
for category_name in self.hdf5_file.keys():
category_group = self.hdf5_file[category_name]
for video_name in category_group.keys():
video_path = f"{category_name}/{video_name}"
self.video_paths.append(video_path)
# Filter by split if specified
if split is not None:
filtered_paths = []
for video_path in self.video_paths:
video_group = self.hdf5_file[video_path]
video_split = video_group.attrs.get('split', 'Unknown')
# Handle bytes type
if isinstance(video_split, bytes):
video_split = video_split.decode('utf-8')
# Case-insensitive comparison
if video_split.lower() == split.lower():
filtered_paths.append(video_path)
self.video_paths = filtered_paths
print(f"Loaded {len(self.video_paths)} videos for split: {split}")
def __len__(self):
return len(self.video_paths)
def __getitem__(self, idx):
video_path = self.video_paths[idx]
video_group = self.hdf5_file[video_path]
features = np.array(video_group['features'])
labels = np.array(video_group['labels'])
# Convert to tensors
features = torch.from_numpy(features).float()
labels = torch.from_numpy(labels).float()
if self.transform:
features = self.transform(features)
# Get metadata
duration = video_group.attrs.get('duration', 0.0)
split = video_group.attrs.get('split', 'Unknown')
if isinstance(split, bytes):
split = split.decode('utf-8')
return {
'video_id': video_path,
'features': features,
'labels': labels,
'duration': duration,
'split': split
}
def close(self):
"""Close the HDF5 file."""
if self.hdf5_file:
self.hdf5_file.close()
def create_dataloaders_from_huggingface(repo_id, batch_size=16, num_workers=2):
"""Download dataset from Hugging Face and create dataloaders."""
# Download the HDF5 file
print(f"Downloading dataset from {repo_id}...")
hdf5_path = download_and_load_dataset(repo_id)
print(f"✓ Dataset downloaded to: {hdf5_path}")
# Create datasets
print("\nCreating datasets...")
train_dataset = UCFCrimeDataset(hdf5_path, split='Train')
val_dataset = UCFCrimeDataset(hdf5_path, split='Val')
test_dataset = UCFCrimeDataset(hdf5_path, split='Test')
# Create dataloaders
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True
)
print(f"\n{'='*60}")
print("Dataset Statistics:")
print(f"{'='*60}")
print(f" Training set: {len(train_dataset):>4} videos")
print(f" Validation set: {len(val_dataset):>4} videos")
print(f" Test set: {len(test_dataset):>4} videos")
print(f" Total: {len(train_dataset) + len(val_dataset) + len(test_dataset):>4} videos")
print(f"{'='*60}")
return train_loader, val_loader, test_loader, hdf5_path
if __name__ == "__main__":
repo_id = "Rahima411/ucf-anomaly-detection-mapped"
# Create dataloaders
train_loader, val_loader, test_loader, hdf5_path = create_dataloaders_from_huggingface(
repo_id=repo_id,
batch_size=16,
num_workers=2
)
# Test loading batches
print("\nLoading Data...")
print("-" * 60)
for split_name, loader in [("Train", train_loader), ("Val", val_loader), ("Test", test_loader)]:
print(f"\n{split_name} set - First batch:")
for batch in loader:
print(f" Batch size: {len(batch['video_id'])} videos")
print(f" Features shape: {batch['features'].shape}")
print(f" Labels shape: {batch['labels'].shape}")
print(f" Sample video IDs: {batch['video_id'][:3]}")
# Calculate anomaly statistics
labels_np = batch['labels'].numpy()
num_anomaly_frames = (labels_np == 1).sum()
total_frames = labels_np.size
anomaly_pct = 100 * num_anomaly_frames / total_frames if total_frames > 0 else 0
print(f" Anomaly frames: {num_anomaly_frames:,} / {total_frames:,} ({anomaly_pct:.2f}%)")
break # Only show first batch
|