richardschattner commited on
Commit
c62c87b
·
verified ·
1 Parent(s): 9e99f58

Upload 4 files

Browse files
Files changed (4) hide show
  1. data.py +20 -0
  2. model.py +26 -0
  3. model_4epochs_90acc.pth +3 -0
  4. train.py +92 -0
data.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import DataLoader
2
+ import torchvision
3
+
4
+ #get the correct transform for the effnet_b2 model
5
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
6
+ transform = weights.transforms()
7
+
8
+ #create test/train datasets and dataloaders
9
+ train_dir = "intel_image/seg_train"
10
+ test_dir = "intel_image/seg_test"
11
+
12
+ train_data = torchvision.datasets.ImageFolder(root = train_dir, transform = transform)
13
+ test_data = torchvision.datasets.ImageFolder(root = test_dir, transform = transform)
14
+
15
+ train_loader = DataLoader(train_data, shuffle = True, batch_size = 32)
16
+ test_loader = DataLoader(test_data, shuffle = False, batch_size = 32)
17
+
18
+ def create_dataloaders():
19
+ """Returns: Training and test dataloaders """
20
+ return train_loader, test_loader
model.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torch import nn
4
+
5
+ def create_model(num_classes = 6, seed = 1):
6
+ """Create an instance of the effnet_b2 model, freezes all layers and changes the classifier head.
7
+
8
+ Returns: The model and its data transform
9
+ """
10
+ #get pretrained model and its transform
11
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
12
+ model = torchvision.models.efficientnet_b2(weights = weights)
13
+ transform = weights.transforms()
14
+
15
+ #freeze all layers
16
+ for param in model.parameters():
17
+ param.requires_grad = False
18
+
19
+ #create a new classifier head with 6 output classes
20
+ classifier = nn.Sequential(nn.Dropout(p = 0.2, inplace = True),
21
+ nn.Linear(in_features = 1408, out_features = num_classes))
22
+
23
+ #replace old classifier head with newly created one
24
+ model.classifier = classifier
25
+
26
+ return model, transform
model_4epochs_90acc.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e8c832d58b318e27fde63d94666d60075c2b29cd0a14480a96af2a88244c112
3
+ size 31289978
train.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tqdm.auto import tqdm
2
+ import torch
3
+ from torch import nn
4
+
5
+ from data.py import create_dataloaders
6
+
7
+ #get the train/test dataloaders from data.py
8
+ train_loader, test_loader = create_dataloaders()
9
+
10
+ #define an accuracy function
11
+ def accuracy_fn(y_true, y_pred):
12
+ correct = torch.eq(y_true, y_pred).sum().item()
13
+ acc = (correct / len(y_pred)) * 100
14
+ return acc
15
+
16
+ #instantiate loss function and optimizer
17
+ loss_fn = nn.CrossEntropyLoss()
18
+ optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
19
+
20
+
21
+ #create a function for a training step
22
+ def train_step(model):
23
+ train_loss, train_accuracy = 0, 0
24
+ model.train()
25
+
26
+ for batch, (x,y) in enumerate(train_loader):
27
+ #get predictions
28
+ y_logits = model(x)
29
+ y_pred = y_logits.argmax(dim = 1)
30
+
31
+ #calculate loss
32
+ loss = loss_fn(y_logits, y)
33
+ train_loss += loss.item()
34
+ train_accuracy += accuracy_fn(y, y_pred)
35
+
36
+ #update model
37
+ optimizer.zero_grad()
38
+ loss.backward()
39
+ optimizer.step()
40
+
41
+ #divide test loss and accuracy by length of dataloader
42
+ train_loss /= len(train_loader)
43
+ train_accuracy /= len(train_loader)
44
+
45
+ #return train loss and accuracy
46
+ return train_loss, train_accuracy
47
+
48
+ #create a function to test the model
49
+ def test_step(model):
50
+ test_loss, test_accuracy = 0, 0
51
+
52
+ model.eval()
53
+ with torch.inference_mode():
54
+ for batch, (x,y) in enumerate(test_loader):
55
+ y_logits = model(x)
56
+ y_pred = y_logits.argmax(dim = 1)
57
+
58
+ loss = loss_fn(y_logits, y)
59
+ test_loss += loss.item()
60
+ test_accuracy += accuracy_fn(y, y_pred)
61
+
62
+ #divide test loss and accuracy by length of dataloader
63
+ test_loss /= len(test_loader)
64
+ test_accuracy /= len(test_loader)
65
+
66
+ #return test loss and accuracy
67
+ return test_loss, test_accuracy
68
+
69
+ def train(model, epochs):
70
+ """Trains a model for a given number of epochs
71
+
72
+ Args: model and epochs
73
+ Returns: The trained model and a dictionary of train/test loss and train/test accuracy for each epoch.
74
+ """
75
+ #create an empty list of train/test metrics
76
+ train_loss, test_loss, train_acc, test_acc = [], [], [], []
77
+ for epoch in tqdm(range(epochs)):
78
+ #train step and save the loss and accuracy
79
+ new_train_loss, new_train_acc = train_step(model)
80
+ train_loss.append(new_train_loss)
81
+ train_acc.append(new_train_acc)
82
+
83
+ #test step and save the loss and accuracy
84
+ new_test_loss, new_test_acc = test_step(model)
85
+ test_loss.append(new_test_loss)
86
+ test_acc.append(new_test_acc)
87
+
88
+ #put the metrics in a dictionary
89
+ metrics = {"train_loss": train_loss, "test_loss" : test_loss,
90
+ "train_acc": train_acc, "test_acc": test_acc}
91
+
92
+ return model, metrics