File size: 2,901 Bytes
4ec6f12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Dataset creation for usage from the videos based on the video preprocessing required.
from torch.utils.data import DataLoader
from torchvision.io import read_image
from torch.utils.data import Dataset
from torchvision.transforms import v2
from torchvision import transforms
from torchvision import datasets
from PIL import Image
import pandas as pd
import idx2numpy, os
import torch

# Dataset creation,
# Loading from a custom dataset
IMAGE_DIMS = 224

normal_transforms = v2.Compose([
    v2.Resize(size=(IMAGE_DIMS, IMAGE_DIMS)),
    # convert to rgb from greyscale.
    # v2.RandomHorizontalFlip(p=0.5),
    v2.ToDtype(torch.float32), # , scale=True),
    # v2.RandomPerspective(distortion_scale=0.6, p=0.4),
    # v2.GaussianBlur(kernel_size=(5, 11), sigma=(0.1, 0.2)),
    v2.RandomRotation(degrees=(-15, 15)),
    # v2.RandomAffine(degrees=(-15, 15)),
    # v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    transforms.Normalize((0.13066047430038452,), (0.30810782313346863,)),
])




class CustomImageDataset(Dataset):
    """
        This class must inherit from the torch.utils.data.Dataset class.
        And contina functions __init__, __len__, and __getitem__.
    """
    def __init__(self, annotations_file, img_dir, transform=None, target_transform=None):
        self.img_labels = pd.read_csv(annotations_file)
        self.img_dir = img_dir
        self.transform = transform
        self.target_transform = target_transform


    def __len__(self):
        return len(self.img_labels)

    def __getitem__(self, idx):
        """Get the image and label at the index idx."""
        img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
        Image.open(img_path).convert("RGB").save(img_path)
        image = read_image(img_path)
        label = self.img_labels.iloc[idx, 1]
        if self.transform:
            image = self.transform(image)
        if self.target_transform:
            label = self.target_transform(label)
        return image, label


train_data = CustomImageDataset("./dataset/root/labels.csv", "./dataset/root/train/", transform=normal_transforms)

# Create a DataLoader, so we can iterate through the dataset in batches.
#train_loader = DataLoader(train_data, batch_size=64, shuffle=True, )

# Testing the dataloader.
# for i, (images, labels) in enumerate(train_loader):
#     print(i, images.shape, labels.shape)


train_size = int(0.8 * len(train_data))
test_size = len(train_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(train_data, [train_size, test_size])

# Create DataLoader for train and test sets
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

print("Data loader and Test Loaders are ready to be used.")


# Create first stage labels,
# movement stage labels
# final stage labels.