Spaces:
Sleeping
Sleeping
File size: 1,838 Bytes
eacbcc2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
def create_dataloaders(batch_size=32):
"""Creates dataloaders for the Flowers102 dataset."""
# ๐ผ๏ธ Define image size and normalization parameters
IMG_SIZE = 224
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
# ๐ Define transforms for training data (with augmentation)
train_transforms = transforms.Compose(
[
transforms.RandomRotation(30),
transforms.RandomResizedCrop(IMG_SIZE),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD),
]
)
# โ๏ธ Define transforms for validation and test data (no augmentation)
test_transforms = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD),
]
)
# ๐ฆ Load the Flowers102 dataset
train_dataset = datasets.Flowers102(
root="./data", split="train", download=True, transform=train_transforms
)
val_dataset = datasets.Flowers102(
root="./data", split="val", download=True, transform=test_transforms
)
test_dataset = datasets.Flowers102(
root="./data", split="test", download=True, transform=test_transforms
)
# ๐ Create DataLoaders for each dataset
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
return train_loader, val_loader, test_loader
|