Datasets:
File size: 11,048 Bytes
4271e91 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 |
"""
Utilities for ImageNet-100 parquet data inspection and debugging.
This module provides functions to inspect the structure and content of the
ImageNet-100 parquet dataset files.
Usage:
# Debug the parquet file structure
from scripts.utils import debug_structure
debug_structure()
# Check image sizes in the dataset
from scripts.utils import check_image_sizes
check_image_sizes()
# Analyze memory usage
from scripts.utils import analyze_memory_usage
analyze_memory_usage()
"""
import pandas as pd
from PIL import Image
import io
from pathlib import Path
import sys
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
def debug_structure(data_dir: str = "data") -> None:
"""
Debug and inspect the parquet data structure.
This function loads a sample parquet file and prints detailed information
about the data structure, including column names, data types, and how
image data is stored.
Args:
data_dir (str): Path to the directory containing parquet files.
Defaults to "data".
Returns:
None
Example:
>>> debug_structure()
DataFrame shape: (7453, 2)
Columns: ['image', 'label']
First row data types:
image: <class 'dict'>
label: <class 'numpy.int64'>
Image data type: <class 'dict'>
Image dict keys: ['bytes', 'path']
bytes: <class 'bytes'> - b'\x89PNG\r\n\x1a\n...
path: <class 'NoneType'> - None...
"""
data_path = Path(data_dir)
parquet_file = data_path / "train-00000-of-00017.parquet"
if not parquet_file.exists():
raise FileNotFoundError(f"Parquet file not found: {parquet_file}")
df = pd.read_parquet(parquet_file)
print(f"DataFrame shape: {df.shape}")
print(f"Columns: {list(df.columns)}")
# Check first sample
first_row = df.iloc[0]
print(f"\nFirst row data types:")
for col in df.columns:
print(f" {col}: {type(first_row[col])}")
# Check image column structure
image_data = first_row['image']
print(f"\nImage data type: {type(image_data)}")
if isinstance(image_data, dict):
print(f"Image dict keys: {list(image_data.keys())}")
for key, value in image_data.items():
print(f" {key}: {type(value)} - {str(value)[:100]}...")
elif isinstance(image_data, bytes):
print(f"Image bytes length: {len(image_data)}")
else:
print(f"Image data: {str(image_data)[:200]}...")
def check_image_sizes(data_dir: str = "data", num_samples: int = 10) -> None:
"""
Check actual image sizes in the parquet data.
This function inspects a sample of images from both train and validation
splits to determine their original dimensions before any resizing.
Args:
data_dir (str): Path to the directory containing parquet files.
Defaults to "data".
num_samples (int): Number of images to check from each file.
Defaults to 10.
Returns:
None
Example:
>>> check_image_sizes()
=== train-00000-of-00017.parquet ===
Sample image sizes: [(213, 160), (160, 243), (160, 213), ...]
Unique sizes found: [(160, 213), (213, 160), (241, 160), ...]
Multiple sizes found!
"""
data_path = Path(data_dir)
# Check a few files from both train and validation
files_to_check = [
"train-00000-of-00017.parquet",
"validation-00000-of-00001.parquet"
]
for filename in files_to_check:
file_path = data_path / filename
if not file_path.exists():
print(f"Warning: {filename} not found, skipping...")
continue
print(f"\n=== {filename} ===")
df = pd.read_parquet(file_path)
sizes = []
for i in range(min(num_samples, len(df))):
try:
image_bytes = df.iloc[i]['image']['bytes']
image = Image.open(io.BytesIO(image_bytes))
sizes.append(image.size)
except Exception as e:
print(f"Error processing image {i}: {e}")
continue
print(f"Sample image sizes: {sizes}")
# Get unique sizes
unique_sizes = list(set(sizes))
print(f"Unique sizes found: {unique_sizes}")
if len(unique_sizes) == 1:
print(
f"All checked images are {unique_sizes[0][0]}x{unique_sizes[0][1]}")
else:
print("Multiple sizes found!")
def analyze_memory_usage(data_dir: str = "data", batch_size: int = 32,
num_batches: int = 5) -> None:
"""
Analyze actual PyTorch tensor memory usage from dataloader.
This function loads real batches through PyTorch dataloader and measures
actual tensor memory usage for more accurate training memory estimates.
Args:
data_dir (str): Path to directory containing parquet files.
Defaults to "data".
batch_size (int): Batch size to test with. Defaults to 32.
num_batches (int): Number of batches to sample. Defaults to 5.
Returns:
None
Example:
>>> analyze_memory_usage()
=== PyTorch Memory Usage Analysis ===
Loading ImageNet100Parquet dataset...
=== Batch Analysis ===
Analyzing 5 batches of size 32...
Batch 1: 13.2 MB (tensors: 2, samples: 32)
Batch 2: 13.1 MB (tensors: 2, samples: 32)
...
=== Memory Estimates ===
Per batch average: 13.1 MB
Per sample average: 0.41 MB
Estimated total memory: 52.5 GB
"""
print("=== PyTorch Memory Usage Analysis ===")
try:
# Import the dataloader class
import sys
import os
sys.path.append(os.path.dirname(__file__))
from pytorch_dataloader import ImageNet100Parquet
# Create dataset and dataloader
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
])
print("Loading ImageNet100Parquet dataset...")
dataset = ImageNet100Parquet(data_dir, "train", transform)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
print(f"Dataset size: {len(dataset):,} samples")
print(f"Analyzing {num_batches} batches of size {batch_size}...\n")
total_samples_analyzed = 0
total_memory_per_batch = 0
batch_memory_usages = []
print("=== Batch Analysis ===")
for batch_idx, (images, labels) in enumerate(dataloader):
if batch_idx >= num_batches:
break
# Calculate actual tensor memory usage
image_memory = images.element_size() * images.numel()
label_memory = labels.element_size() * labels.numel()
batch_memory = image_memory + label_memory
batch_memory_mb = batch_memory / (1024**2)
batch_memory_usages.append(batch_memory_mb)
total_samples_analyzed += images.size(0)
total_memory_per_batch += batch_memory_mb
print(f"Batch {batch_idx + 1}: {batch_memory_mb:.1f} MB "
f"(tensors: {images.dim() + labels.dim()}, samples: {images.size(0)})")
# Clean up tensors
del images, labels
torch.cuda.empty_cache() if torch.cuda.is_available() else None
if not batch_memory_usages:
print("No batches analyzed!")
return
avg_batch_memory = sum(batch_memory_usages) / len(batch_memory_usages)
avg_sample_memory = avg_batch_memory / batch_size
estimated_total_batches = len(dataset) / batch_size
estimated_total_memory = avg_batch_memory * estimated_total_batches
print(f"\n=== Memory Estimates ===")
print(f"Per batch average: {avg_batch_memory:.1f} MB")
print(f"Per sample average: {avg_sample_memory:.2f} MB")
print(f"Dataset samples: {len(dataset):,}")
print(f"Estimated total batches: {estimated_total_batches:.0f}")
print(f"Estimated total memory: {estimated_total_memory:.1f} MB "
f"({estimated_total_memory / 1024:.1f} GB)")
# Also analyze validation
print(f"\n=== Validation Dataset ===")
try:
val_dataset = ImageNet100Parquet(data_dir, "validation", transform)
val_dataloader = DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
val_samples = 0
val_memory_total = 0
for images, labels in val_dataloader:
image_memory = images.element_size() * images.numel()
label_memory = labels.element_size() * labels.numel()
val_memory_total += image_memory + label_memory
val_samples += images.size(0)
break # Just analyze first batch for validation
val_avg_memory = (val_memory_total / val_samples) / \
(1024**2) # Convert to MB
val_total_memory = val_avg_memory * len(val_dataset)
print(f"Validation samples: {len(val_dataset):,}")
print(f"Validation per sample: {val_avg_memory:.2f} MB")
print(f"Validation total memory: {val_total_memory:.1f} MB "
f"({val_total_memory / 1024:.1f} GB)")
except Exception as e:
print(f"Error analyzing validation: {e}")
print(f"\n=== Memory Impact Assessment ===")
if estimated_total_memory > 16: # 16GB threshold
print("⚠️ WARNING: High memory usage detected!")
print(" This implementation may crash systems with <32GB RAM")
print(" Consider reducing batch size or implementing gradient accumulation")
elif estimated_total_memory > 8:
print("⚡ CAUTION: Moderate memory usage")
print(" May be slow on systems with <16GB RAM")
else:
print("✅ Memory usage is reasonable for most systems")
except Exception as e:
print(f"Error during PyTorch memory analysis: {e}")
print("Make sure dataset files exist and are accessible.")
if __name__ == "__main__":
"""
Run utility functions when executed as a script.
Usage:
python scripts/utils.py # Run both utilities
python scripts/utils.py debug # Run debug_structure only
python scripts/utils.py sizes # Run check_image_sizes only
"""
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "debug":
debug_structure()
elif sys.argv[1] == "sizes":
check_image_sizes()
elif sys.argv[1] == "memory":
analyze_memory_usage()
else:
print("Usage: python utils.py [debug|sizes|memory]")
else:
debug_structure()
check_image_sizes()
analyze_memory_usage()
|