File size: 3,252 Bytes
23206d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
#!/usr/bin/env python3
"""
This script is used for splitting an image dataset into training, validation, and test sets.
Expected input format:
```
dataset_name
βββ class_1
β βββ image.jpg
β βββ image.png
β βββ ...
βββ class_2
β βββ image.jpg
β βββ ...
βββ ...
```
Output format:
```
dataset_name
βββ train
β βββ class_1
β β βββ image.jpg
β β βββ ...
β βββ class_2
β β βββ image.jpg
β β βββ ...
βββ val
β βββ class_1
β β βββ image.jpg
β β βββ ...
βββ test
β βββ class_1
β β βββ image.jpg
β β βββ ...
```
"""
import argparse
import os
import random
import shutil
import torch
import torchvision
from sklearn.model_selection import train_test_split
def make_dataset_splits(args: argparse.Namespace) -> None:
random.seed(args.seed)
torch.manual_seed(args.seed)
dataset = torchvision.datasets.ImageFolder(
root=args.dataset_dir,
)
print(f'Total image found: {len(dataset)}')
# using train_test_split to split this dataset into train, test, and val splits
train_indices, test_indices = train_test_split(
range(len(dataset)),
test_size=0.1,
random_state=args.seed,
stratify=[target for _, target in dataset.samples],
)
train_indices, val_indices = train_test_split(
train_indices,
test_size=0.1,
random_state=args.seed,
stratify=[dataset.samples[i][1] for i in train_indices],
)
print(
f'Train size: {len(train_indices)}, '
f'Test size: {len(test_indices)}, '
f'Val size: {len(val_indices)}'
)
# create directories for splits
os.makedirs(args.output_dir, exist_ok=True)
split_names = ['train', 'test', 'val']
# save the splits
for split, indices in zip(
split_names, [train_indices, test_indices, val_indices], strict=True
):
split_dir = os.path.join(args.output_dir, split)
os.makedirs(split_dir, exist_ok=True)
for class_name in dataset.classes:
os.makedirs(os.path.join(split_dir, class_name), exist_ok=True)
for idx in indices:
src_path, label = dataset.samples[idx]
class_name = dataset.classes[label]
dst_path = os.path.join(split_dir, class_name, os.path.basename(src_path))
shutil.copyfile(src_path, dst_path)
def main() -> None:
parser = argparse.ArgumentParser(
description='Make dataset splits',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--seed',
type=int,
help='Random seed',
default=42,
)
parser.add_argument(
'--dataset_dir',
type=str,
required=True,
help='Path to the dataset directory',
)
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Path to the output directory',
)
args = parser.parse_args()
make_dataset_splits(args)
if __name__ == '__main__':
main()
|