Update train.py
Browse files
train.py
CHANGED
|
@@ -28,14 +28,15 @@ def get_transforms():
|
|
| 28 |
return albu.Compose([albu.RandomCrop(512, 512, always_apply = True), albu.HorizontalFlip(p = 0.5)], p = 1.)
|
| 29 |
|
| 30 |
def get_dataloaders(data_path, transforms, batch_size, fine_tuning, mult_number):
|
| 31 |
-
train_dataset = TrainDataset(data_path, transforms, mult_number)
|
| 32 |
-
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size
|
| 33 |
|
| 34 |
if fine_tuning:
|
| 35 |
-
finetuning_dataset = FineTuningDataset(data_path, transforms)
|
| 36 |
-
finetuning_dataloader = torch.utils.data.DataLoader(finetuning_dataset, batch_size
|
| 37 |
-
|
| 38 |
-
|
|
|
|
| 39 |
|
| 40 |
def get_models(device):
|
| 41 |
generator = Generator()
|
|
|
|
| 28 |
return albu.Compose([albu.RandomCrop(512, 512, always_apply = True), albu.HorizontalFlip(p = 0.5)], p = 1.)
|
| 29 |
|
| 30 |
def get_dataloaders(data_path, transforms, batch_size, fine_tuning, mult_number):
|
| 31 |
+
train_dataset = TrainDataset(data_path, transforms, mults_amount=mult_number)
|
| 32 |
+
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
|
| 33 |
|
| 34 |
if fine_tuning:
|
| 35 |
+
finetuning_dataset = FineTuningDataset(data_path, transforms, mult_amount=mult_number)
|
| 36 |
+
finetuning_dataloader = torch.utils.data.DataLoader(finetuning_dataset, batch_size=batch_size, shuffle=True)
|
| 37 |
+
return train_dataloader, finetuning_dataloader
|
| 38 |
+
|
| 39 |
+
return train_dataloader, None
|
| 40 |
|
| 41 |
def get_models(device):
|
| 42 |
generator = Generator()
|