Uploaded the remaining python files
Browse files- model.py +66 -0
- requirements.txt +6 -0
- utils.py +122 -0
model.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# import torch
|
| 3 |
+
# from torch import nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import torchvision
|
| 6 |
+
from torchvision.models import resnet50, ResNet50_Weights
|
| 7 |
+
from utils import *
|
| 8 |
+
from data_setup import classes
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ImageClassificationBase(nn.Module):
|
| 12 |
+
def training_step(self, batch):
|
| 13 |
+
images, labels = batch
|
| 14 |
+
out = self(images)
|
| 15 |
+
# labels = labels.float().unsqueeze(1)
|
| 16 |
+
loss = F.cross_entropy(out, labels)
|
| 17 |
+
acc = accuracy(out, labels)
|
| 18 |
+
# print('training loss and acc:', loss, acc)
|
| 19 |
+
return loss, acc
|
| 20 |
+
|
| 21 |
+
def validation_step(self, batch):
|
| 22 |
+
images, labels = batch
|
| 23 |
+
out = self(images)
|
| 24 |
+
# labels = labels.float().unsqueeze(1)
|
| 25 |
+
loss = F.cross_entropy(out, labels)
|
| 26 |
+
acc = accuracy(out, labels)
|
| 27 |
+
# print('Validation loss and acc:', loss, acc)
|
| 28 |
+
return {'val_loss':loss.detach(), 'val_acc':acc}
|
| 29 |
+
|
| 30 |
+
def validation_end_epoch(self, results):
|
| 31 |
+
batch_loss = [x['val_loss'] for x in results]
|
| 32 |
+
epoch_loss = torch.stack(batch_loss).mean()
|
| 33 |
+
batch_acc = [x['val_acc'] for x in results]
|
| 34 |
+
epoch_acc = torch.stack(batch_acc).mean()
|
| 35 |
+
return {'val_loss':epoch_loss.item(), 'val_acc':epoch_acc.item()}
|
| 36 |
+
|
| 37 |
+
# def epoch_end(self, epoch, outputs):
|
| 38 |
+
# print(f"Epoch {epoch+1}: train_loss: {outputs['train_loss']}, val_loss: {outputs['val_loss']}, val_acc: {outputs['val_acc']}")
|
| 39 |
+
|
| 40 |
+
def epoch_end(self, epoch, result):
|
| 41 |
+
print(f"Epoch {epoch+1}: train_loss: {result['train_losses']:.4f}, train_acc: {result['train_acc']:.4f}, \
|
| 42 |
+
val_loss: {result['val_loss']:.4f}, val_acc: {result['val_acc']:.4f} ")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class FlowerClassificationModel(ImageClassificationBase):
|
| 46 |
+
def __init__(self, num_classes, pretrained=True):
|
| 47 |
+
super().__init__()
|
| 48 |
+
|
| 49 |
+
if pretrained:
|
| 50 |
+
if torchvision.__version__ >= '0.13.0':
|
| 51 |
+
self.network = self.network = resnet50(weights=ResNet50_Weights.DEFAULT)
|
| 52 |
+
|
| 53 |
+
else:
|
| 54 |
+
# 1. Get the base mdoel with pretrained weights and send to target device
|
| 55 |
+
self.network = torchvision.models.resnet50(pretrained=True)
|
| 56 |
+
|
| 57 |
+
for param in self.network.parameters():
|
| 58 |
+
param.requires_grad = False
|
| 59 |
+
|
| 60 |
+
# Replace last layer
|
| 61 |
+
self.network.fc = nn.Linear(in_features=2048,
|
| 62 |
+
out_features=num_classes, # same number of output units as our number of classes
|
| 63 |
+
bias=True)
|
| 64 |
+
|
| 65 |
+
def forward(self, xb):
|
| 66 |
+
return self.network(xb)
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas
|
| 2 |
+
torch
|
| 3 |
+
torchvision
|
| 4 |
+
gradio
|
| 5 |
+
albumentations
|
| 6 |
+
numpy
|
utils.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import matplotlib as plt
|
| 5 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 6 |
+
|
| 7 |
+
def show_example(img,label):
|
| 8 |
+
print('Label: ', classes[label], '('+str(label)+')')
|
| 9 |
+
plt.imshow(img.permute(1, 2, 0))
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def denormalize(images, means, stds):
|
| 13 |
+
means = torch.tensor(means).reshape(1, 3, 1, 1)
|
| 14 |
+
stds = torch.tensor(stds).reshape(1, 3, 1, 1)
|
| 15 |
+
return images * stds + means
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def accuracy(out,labels):
|
| 19 |
+
_, preds = torch.max(out,dim=1)
|
| 20 |
+
total = torch.sum(preds == labels).item()/len(preds)
|
| 21 |
+
return torch.tensor(total)
|
| 22 |
+
|
| 23 |
+
@torch.inference_mode()
|
| 24 |
+
def evaluation(model,val_loader):
|
| 25 |
+
model.eval()
|
| 26 |
+
results = [model.validation_step(batch) for batch in val_loader]
|
| 27 |
+
outputs = model.validation_end_epoch(results)
|
| 28 |
+
return outputs
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def to_device(data, device):
|
| 32 |
+
if isinstance(data, (tuple, list)):
|
| 33 |
+
return [to_device(x, device) for x in data]
|
| 34 |
+
return data.to(device, non_blocking=True)
|
| 35 |
+
|
| 36 |
+
class DeviceDataLoader(DataLoader):
|
| 37 |
+
def __init__(self, dl, device):
|
| 38 |
+
self.dl = dl
|
| 39 |
+
self.device = device
|
| 40 |
+
|
| 41 |
+
def __iter__(self):
|
| 42 |
+
"""Yield a batch of data after moving it to device"""
|
| 43 |
+
for x in self.dl:
|
| 44 |
+
yield to_device(x, self.device)
|
| 45 |
+
|
| 46 |
+
def __len__(self):
|
| 47 |
+
"""Number of batches"""
|
| 48 |
+
return len(self.dl)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def get_lr(optimizer):
|
| 52 |
+
for param_group in optimizer.param_groups:
|
| 53 |
+
return param_group['lr']
|
| 54 |
+
|
| 55 |
+
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
|
| 56 |
+
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
|
| 57 |
+
torch.cuda.empty_cache()
|
| 58 |
+
history = []
|
| 59 |
+
|
| 60 |
+
# Set up cutom optimizer with weight decay
|
| 61 |
+
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
|
| 62 |
+
|
| 63 |
+
# Set up one-cycle learning rate scheduler
|
| 64 |
+
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
|
| 65 |
+
steps_per_epoch=len(train_loader))
|
| 66 |
+
|
| 67 |
+
for epoch in range(epochs):
|
| 68 |
+
# Training Phase
|
| 69 |
+
model.train()
|
| 70 |
+
train_losses = []
|
| 71 |
+
train_acc = []
|
| 72 |
+
lrs = []
|
| 73 |
+
for batch in train_loader:
|
| 74 |
+
loss, acc = model.training_step(batch)
|
| 75 |
+
train_losses.append(loss)
|
| 76 |
+
train_acc.append(acc)
|
| 77 |
+
loss.backward()
|
| 78 |
+
|
| 79 |
+
# Gradient clipping
|
| 80 |
+
if grad_clip:
|
| 81 |
+
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
|
| 82 |
+
|
| 83 |
+
optimizer.step()
|
| 84 |
+
optimizer.zero_grad()
|
| 85 |
+
|
| 86 |
+
# Record & update learning rate
|
| 87 |
+
lrs.append(get_lr(optimizer))
|
| 88 |
+
sched.step()
|
| 89 |
+
|
| 90 |
+
# Validation phase
|
| 91 |
+
result = evaluation(model, val_loader)
|
| 92 |
+
result['train_losses'] = torch.stack(train_losses).mean().item()
|
| 93 |
+
result['train_acc'] = torch.stack(train_acc).mean().item()
|
| 94 |
+
result['lrs'] = lrs
|
| 95 |
+
model.epoch_end(epoch, result)
|
| 96 |
+
history.append(result)
|
| 97 |
+
return history
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def plot_accuracies(history):
|
| 101 |
+
plt.plot([x['val_acc'] for x in history], '-rx')
|
| 102 |
+
plt.plot([x['train_acc'] for x in history[1:]], '-bx')
|
| 103 |
+
plt.xlabel('epoch')
|
| 104 |
+
plt.ylabel('accuracy')
|
| 105 |
+
plt.legend(['Validation', 'Training'])
|
| 106 |
+
plt.title('Accuracy vs. No. of epochs');
|
| 107 |
+
|
| 108 |
+
def plot_losses(history):
|
| 109 |
+
plt.plot([x['val_loss'] for x in history], '-rx')
|
| 110 |
+
plt.plot([x['train_losses'] for x in history[1:]], '-bx')
|
| 111 |
+
plt.xlabel('epoch')
|
| 112 |
+
plt.ylabel('loss')
|
| 113 |
+
plt.legend(['Validation', 'Training'])
|
| 114 |
+
plt.title('Loss vs. No. of epochs');
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def plot_lrs(history):
|
| 118 |
+
lrs = np.concatenate([x.get('lrs', []) for x in history])
|
| 119 |
+
plt.plot(lrs)
|
| 120 |
+
plt.xlabel('Batch no.')
|
| 121 |
+
plt.ylabel('Learning rate')
|
| 122 |
+
plt.title('Learning Rate vs. Batch no.');
|