basavyr's picture
refactor code to be compatible for pytorch
4271e91
"""
Utilities for ImageNet-100 parquet data inspection and debugging.
This module provides functions to inspect the structure and content of the
ImageNet-100 parquet dataset files.
Usage:
# Debug the parquet file structure
from scripts.utils import debug_structure
debug_structure()
# Check image sizes in the dataset
from scripts.utils import check_image_sizes
check_image_sizes()
# Analyze memory usage
from scripts.utils import analyze_memory_usage
analyze_memory_usage()
"""
import pandas as pd
from PIL import Image
import io
from pathlib import Path
import sys
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
def debug_structure(data_dir: str = "data") -> None:
"""
Debug and inspect the parquet data structure.
This function loads a sample parquet file and prints detailed information
about the data structure, including column names, data types, and how
image data is stored.
Args:
data_dir (str): Path to the directory containing parquet files.
Defaults to "data".
Returns:
None
Example:
>>> debug_structure()
DataFrame shape: (7453, 2)
Columns: ['image', 'label']
First row data types:
image: <class 'dict'>
label: <class 'numpy.int64'>
Image data type: <class 'dict'>
Image dict keys: ['bytes', 'path']
bytes: <class 'bytes'> - b'\x89PNG\r\n\x1a\n...
path: <class 'NoneType'> - None...
"""
data_path = Path(data_dir)
parquet_file = data_path / "train-00000-of-00017.parquet"
if not parquet_file.exists():
raise FileNotFoundError(f"Parquet file not found: {parquet_file}")
df = pd.read_parquet(parquet_file)
print(f"DataFrame shape: {df.shape}")
print(f"Columns: {list(df.columns)}")
# Check first sample
first_row = df.iloc[0]
print(f"\nFirst row data types:")
for col in df.columns:
print(f" {col}: {type(first_row[col])}")
# Check image column structure
image_data = first_row['image']
print(f"\nImage data type: {type(image_data)}")
if isinstance(image_data, dict):
print(f"Image dict keys: {list(image_data.keys())}")
for key, value in image_data.items():
print(f" {key}: {type(value)} - {str(value)[:100]}...")
elif isinstance(image_data, bytes):
print(f"Image bytes length: {len(image_data)}")
else:
print(f"Image data: {str(image_data)[:200]}...")
def check_image_sizes(data_dir: str = "data", num_samples: int = 10) -> None:
"""
Check actual image sizes in the parquet data.
This function inspects a sample of images from both train and validation
splits to determine their original dimensions before any resizing.
Args:
data_dir (str): Path to the directory containing parquet files.
Defaults to "data".
num_samples (int): Number of images to check from each file.
Defaults to 10.
Returns:
None
Example:
>>> check_image_sizes()
=== train-00000-of-00017.parquet ===
Sample image sizes: [(213, 160), (160, 243), (160, 213), ...]
Unique sizes found: [(160, 213), (213, 160), (241, 160), ...]
Multiple sizes found!
"""
data_path = Path(data_dir)
# Check a few files from both train and validation
files_to_check = [
"train-00000-of-00017.parquet",
"validation-00000-of-00001.parquet"
]
for filename in files_to_check:
file_path = data_path / filename
if not file_path.exists():
print(f"Warning: {filename} not found, skipping...")
continue
print(f"\n=== {filename} ===")
df = pd.read_parquet(file_path)
sizes = []
for i in range(min(num_samples, len(df))):
try:
image_bytes = df.iloc[i]['image']['bytes']
image = Image.open(io.BytesIO(image_bytes))
sizes.append(image.size)
except Exception as e:
print(f"Error processing image {i}: {e}")
continue
print(f"Sample image sizes: {sizes}")
# Get unique sizes
unique_sizes = list(set(sizes))
print(f"Unique sizes found: {unique_sizes}")
if len(unique_sizes) == 1:
print(
f"All checked images are {unique_sizes[0][0]}x{unique_sizes[0][1]}")
else:
print("Multiple sizes found!")
def analyze_memory_usage(data_dir: str = "data", batch_size: int = 32,
num_batches: int = 5) -> None:
"""
Analyze actual PyTorch tensor memory usage from dataloader.
This function loads real batches through PyTorch dataloader and measures
actual tensor memory usage for more accurate training memory estimates.
Args:
data_dir (str): Path to directory containing parquet files.
Defaults to "data".
batch_size (int): Batch size to test with. Defaults to 32.
num_batches (int): Number of batches to sample. Defaults to 5.
Returns:
None
Example:
>>> analyze_memory_usage()
=== PyTorch Memory Usage Analysis ===
Loading ImageNet100Parquet dataset...
=== Batch Analysis ===
Analyzing 5 batches of size 32...
Batch 1: 13.2 MB (tensors: 2, samples: 32)
Batch 2: 13.1 MB (tensors: 2, samples: 32)
...
=== Memory Estimates ===
Per batch average: 13.1 MB
Per sample average: 0.41 MB
Estimated total memory: 52.5 GB
"""
print("=== PyTorch Memory Usage Analysis ===")
try:
# Import the dataloader class
import sys
import os
sys.path.append(os.path.dirname(__file__))
from pytorch_dataloader import ImageNet100Parquet
# Create dataset and dataloader
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
])
print("Loading ImageNet100Parquet dataset...")
dataset = ImageNet100Parquet(data_dir, "train", transform)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
print(f"Dataset size: {len(dataset):,} samples")
print(f"Analyzing {num_batches} batches of size {batch_size}...\n")
total_samples_analyzed = 0
total_memory_per_batch = 0
batch_memory_usages = []
print("=== Batch Analysis ===")
for batch_idx, (images, labels) in enumerate(dataloader):
if batch_idx >= num_batches:
break
# Calculate actual tensor memory usage
image_memory = images.element_size() * images.numel()
label_memory = labels.element_size() * labels.numel()
batch_memory = image_memory + label_memory
batch_memory_mb = batch_memory / (1024**2)
batch_memory_usages.append(batch_memory_mb)
total_samples_analyzed += images.size(0)
total_memory_per_batch += batch_memory_mb
print(f"Batch {batch_idx + 1}: {batch_memory_mb:.1f} MB "
f"(tensors: {images.dim() + labels.dim()}, samples: {images.size(0)})")
# Clean up tensors
del images, labels
torch.cuda.empty_cache() if torch.cuda.is_available() else None
if not batch_memory_usages:
print("No batches analyzed!")
return
avg_batch_memory = sum(batch_memory_usages) / len(batch_memory_usages)
avg_sample_memory = avg_batch_memory / batch_size
estimated_total_batches = len(dataset) / batch_size
estimated_total_memory = avg_batch_memory * estimated_total_batches
print(f"\n=== Memory Estimates ===")
print(f"Per batch average: {avg_batch_memory:.1f} MB")
print(f"Per sample average: {avg_sample_memory:.2f} MB")
print(f"Dataset samples: {len(dataset):,}")
print(f"Estimated total batches: {estimated_total_batches:.0f}")
print(f"Estimated total memory: {estimated_total_memory:.1f} MB "
f"({estimated_total_memory / 1024:.1f} GB)")
# Also analyze validation
print(f"\n=== Validation Dataset ===")
try:
val_dataset = ImageNet100Parquet(data_dir, "validation", transform)
val_dataloader = DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
val_samples = 0
val_memory_total = 0
for images, labels in val_dataloader:
image_memory = images.element_size() * images.numel()
label_memory = labels.element_size() * labels.numel()
val_memory_total += image_memory + label_memory
val_samples += images.size(0)
break # Just analyze first batch for validation
val_avg_memory = (val_memory_total / val_samples) / \
(1024**2) # Convert to MB
val_total_memory = val_avg_memory * len(val_dataset)
print(f"Validation samples: {len(val_dataset):,}")
print(f"Validation per sample: {val_avg_memory:.2f} MB")
print(f"Validation total memory: {val_total_memory:.1f} MB "
f"({val_total_memory / 1024:.1f} GB)")
except Exception as e:
print(f"Error analyzing validation: {e}")
print(f"\n=== Memory Impact Assessment ===")
if estimated_total_memory > 16: # 16GB threshold
print("⚠️ WARNING: High memory usage detected!")
print(" This implementation may crash systems with <32GB RAM")
print(" Consider reducing batch size or implementing gradient accumulation")
elif estimated_total_memory > 8:
print("⚡ CAUTION: Moderate memory usage")
print(" May be slow on systems with <16GB RAM")
else:
print("✅ Memory usage is reasonable for most systems")
except Exception as e:
print(f"Error during PyTorch memory analysis: {e}")
print("Make sure dataset files exist and are accessible.")
if __name__ == "__main__":
"""
Run utility functions when executed as a script.
Usage:
python scripts/utils.py # Run both utilities
python scripts/utils.py debug # Run debug_structure only
python scripts/utils.py sizes # Run check_image_sizes only
"""
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "debug":
debug_structure()
elif sys.argv[1] == "sizes":
check_image_sizes()
elif sys.argv[1] == "memory":
analyze_memory_usage()
else:
print("Usage: python utils.py [debug|sizes|memory]")
else:
debug_structure()
check_image_sizes()
analyze_memory_usage()