|
|
import torch |
|
|
import pandas as pd |
|
|
from torch.utils.data import Dataset, DataLoader |
|
|
from PIL import Image |
|
|
import io |
|
|
from pathlib import Path |
|
|
from torchvision import transforms |
|
|
from typing import Optional, Callable, Tuple, Union |
|
|
|
|
|
|
|
|
class ImageNet100Parquet(Dataset): |
|
|
def __init__(self, data_dir: str, split: str = "train", |
|
|
transform: Optional[Callable] = None) -> None: |
|
|
""" |
|
|
Initialize ImageNet-100 dataset from parquet files. |
|
|
|
|
|
Args: |
|
|
data_dir: Path to directory containing parquet files. |
|
|
split: Dataset split - either "train" or "validation". |
|
|
transform: Optional torchvision transforms to apply to images. |
|
|
|
|
|
Raises: |
|
|
FileNotFoundError: If data directory doesn't exist. |
|
|
ValueError: If no parquet files found for the specified split. |
|
|
""" |
|
|
self.data_dir = Path(data_dir) |
|
|
self.transform = transform |
|
|
|
|
|
|
|
|
if not self.data_dir.exists(): |
|
|
raise FileNotFoundError( |
|
|
f"Data directory not found: {self.data_dir}") |
|
|
|
|
|
|
|
|
valid_splits = ["train", "validation"] |
|
|
if split not in valid_splits: |
|
|
raise ValueError( |
|
|
f"Invalid split '{split}'. Must be one of: {valid_splits}") |
|
|
|
|
|
|
|
|
parquet_files = sorted(self.data_dir.glob(f"{split}-*.parquet")) |
|
|
if not parquet_files: |
|
|
raise ValueError( |
|
|
f"No parquet files found for split '{split}' in {self.data_dir}") |
|
|
|
|
|
try: |
|
|
self.data = pd.concat([pd.read_parquet(f) |
|
|
for f in parquet_files], ignore_index=True) |
|
|
except Exception as e: |
|
|
raise RuntimeError(f"Failed to load parquet files: {e}") |
|
|
|
|
|
def __len__(self) -> int: |
|
|
"""Return the total number of samples in the dataset.""" |
|
|
return len(self.data) |
|
|
|
|
|
def __getitem__(self, idx: int) -> Tuple[Union[Image.Image, torch.Tensor], torch.Tensor]: |
|
|
""" |
|
|
Get a sample from the dataset at the given index. |
|
|
|
|
|
Args: |
|
|
idx: Index of the sample to retrieve. |
|
|
|
|
|
Returns: |
|
|
Tuple of (image, label) where image is a PIL Image or transformed tensor, |
|
|
and label is a torch tensor with dtype long. |
|
|
|
|
|
Raises: |
|
|
IndexError: If idx is out of range. |
|
|
RuntimeError: If image decoding fails. |
|
|
""" |
|
|
if idx >= len(self.data): |
|
|
raise IndexError( |
|
|
f"Index {idx} out of range for dataset of size {len(self.data)}") |
|
|
|
|
|
try: |
|
|
row = self.data.iloc[idx] |
|
|
|
|
|
|
|
|
image_bytes = row['image']['bytes'] |
|
|
image = Image.open(io.BytesIO(image_bytes)).convert('RGB') |
|
|
label = torch.tensor(row['label'], dtype=torch.long) |
|
|
|
|
|
if self.transform: |
|
|
image = self.transform(image) |
|
|
|
|
|
return image, label |
|
|
except Exception as e: |
|
|
raise RuntimeError(f"Failed to load sample at index {idx}: {e}") |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
data_dir = "../data" |
|
|
|
|
|
transform = transforms.Compose([ |
|
|
transforms.Resize((224, 224)), |
|
|
transforms.ToTensor(), |
|
|
]) |
|
|
|
|
|
train_dataset = ImageNet100Parquet(data_dir, "train", transform) |
|
|
test_dataset = ImageNet100Parquet(data_dir, "validation", transform) |
|
|
|
|
|
print(f"Train dataset size: {len(train_dataset)}") |
|
|
print(f"Test dataset size: {len(test_dataset)}") |
|
|
|
|
|
|
|
|
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) |
|
|
test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False) |
|
|
|
|
|
|
|
|
print("\nTesting train loader iteration...") |
|
|
stop_idx = 0 |
|
|
for x, y_true in train_loader: |
|
|
print(f"Batch shape: {x.shape}, Labels shape: {y_true.shape}") |
|
|
stop_idx += 1 |
|
|
if stop_idx > 10: |
|
|
break |
|
|
|
|
|
stop_idx = 0 |
|
|
print("\nTesting test loader iteration...") |
|
|
for x, y_true in test_loader: |
|
|
print(f"Batch shape: {x.shape}, Labels shape: {y_true.shape}") |
|
|
stop_idx += 1 |
|
|
if stop_idx > 10: |
|
|
break |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|