File size: 4,291 Bytes
a15bf28
 
 
 
 
 
 
4271e91
a15bf28
 
 
fe2aefa
4271e91
 
 
fe2aefa
4271e91
 
 
 
fe2aefa
4271e91
 
 
 
a15bf28
 
 
4271e91
 
fe2aefa
 
 
4271e91
 
 
fe2aefa
 
4271e91
a15bf28
 
4271e91
fe2aefa
 
 
4271e91
 
 
 
 
 
 
 
a15bf28
 
4271e91
 
 
fe2aefa
4271e91
 
fe2aefa
4271e91
 
 
fe2aefa
4271e91
 
 
 
 
fe2aefa
 
 
4271e91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe2aefa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a15bf28
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import torch
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import io
from pathlib import Path
from torchvision import transforms
from typing import Optional, Callable, Tuple, Union


class ImageNet100Parquet(Dataset):
    def __init__(self, data_dir: str, split: str = "train",
                 transform: Optional[Callable] = None) -> None:
        """
        Initialize ImageNet-100 dataset from parquet files.

        Args:
            data_dir: Path to directory containing parquet files.
            split: Dataset split - either "train" or "validation".
            transform: Optional torchvision transforms to apply to images.

        Raises:
            FileNotFoundError: If data directory doesn't exist.
            ValueError: If no parquet files found for the specified split.
        """
        self.data_dir = Path(data_dir)
        self.transform = transform

        # Validate data directory
        if not self.data_dir.exists():
            raise FileNotFoundError(
                f"Data directory not found: {self.data_dir}")

        # Validate split
        valid_splits = ["train", "validation"]
        if split not in valid_splits:
            raise ValueError(
                f"Invalid split '{split}'. Must be one of: {valid_splits}")

        # Load all parquet files for the split
        parquet_files = sorted(self.data_dir.glob(f"{split}-*.parquet"))
        if not parquet_files:
            raise ValueError(
                f"No parquet files found for split '{split}' in {self.data_dir}")

        try:
            self.data = pd.concat([pd.read_parquet(f)
                                  for f in parquet_files], ignore_index=True)
        except Exception as e:
            raise RuntimeError(f"Failed to load parquet files: {e}")

    def __len__(self) -> int:
        """Return the total number of samples in the dataset."""
        return len(self.data)

    def __getitem__(self, idx: int) -> Tuple[Union[Image.Image, torch.Tensor], torch.Tensor]:
        """
        Get a sample from the dataset at the given index.

        Args:
            idx: Index of the sample to retrieve.

        Returns:
            Tuple of (image, label) where image is a PIL Image or transformed tensor,
            and label is a torch tensor with dtype long.

        Raises:
            IndexError: If idx is out of range.
            RuntimeError: If image decoding fails.
        """
        if idx >= len(self.data):
            raise IndexError(
                f"Index {idx} out of range for dataset of size {len(self.data)}")

        try:
            row = self.data.iloc[idx]

            # Decode image from bytes (stored in dict)
            image_bytes = row['image']['bytes']
            image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
            label = torch.tensor(row['label'], dtype=torch.long)

            if self.transform:
                image = self.transform(image)

            return image, label
        except Exception as e:
            raise RuntimeError(f"Failed to load sample at index {idx}: {e}")


def main() -> None:
    data_dir = "../data"
    # Define transforms
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
    ])
    # Create datasets
    train_dataset = ImageNet100Parquet(data_dir, "train", transform)
    test_dataset = ImageNet100Parquet(data_dir, "validation", transform)

    print(f"Train dataset size: {len(train_dataset)}")
    print(f"Test dataset size: {len(test_dataset)}")

    # Create dataloaders
    train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False)

    # Test iteration
    print("\nTesting train loader iteration...")
    stop_idx = 0
    for x, y_true in train_loader:
        print(f"Batch shape: {x.shape}, Labels shape: {y_true.shape}")
        stop_idx += 1
        if stop_idx > 10:
            break

    stop_idx = 0
    print("\nTesting test loader iteration...")
    for x, y_true in test_loader:
        print(f"Batch shape: {x.shape}, Labels shape: {y_true.shape}")
        stop_idx += 1
        if stop_idx > 10:
            break


if __name__ == "__main__":
    main()