Spaces:
Sleeping
Sleeping
Commit
·
e2f7ccb
1
Parent(s):
abe0995
code for fashionmnist
Browse files- .pytest_cache/models/MNISTnet_state_dict.pt +3 -0
- .pytest_cache/models/convmodel.py +25 -0
- .pytest_cache/pyt_project/data_setup.py +61 -0
- .pytest_cache/pyt_project/engine.py +190 -0
- .pytest_cache/pyt_project/model_builder.py +54 -0
- .pytest_cache/pyt_project/train.py +61 -0
- .pytest_cache/pyt_project/utils.py +33 -0
- .pytest_cache/v/cache/stepwise +1 -0
- app.py +31 -43
.pytest_cache/models/MNISTnet_state_dict.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f523ead7fb14f805b61b79aa976cefbc3994af63d2dd2f986692acb178f217a
|
| 3 |
+
size 83328
|
.pytest_cache/models/convmodel.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
|
| 4 |
+
class MNISTnet(nn.Module):
|
| 5 |
+
def __init__(self, input_channels, num_labels, hidden_layers):
|
| 6 |
+
super().__init__()
|
| 7 |
+
self.block_one = nn.Sequential(
|
| 8 |
+
nn.Conv2d(in_channels=input_channels, out_channels=hidden_layers, kernel_size=3, stride=1, padding='same'),
|
| 9 |
+
nn.ReLU(),
|
| 10 |
+
)
|
| 11 |
+
self.block_two = nn.Sequential(
|
| 12 |
+
nn.Conv2d(in_channels=hidden_layers, out_channels=num_labels, kernel_size=3, stride=1, padding='same'),
|
| 13 |
+
nn.ReLU(),
|
| 14 |
+
nn.MaxPool2d(kernel_size=2, stride=2)
|
| 15 |
+
)
|
| 16 |
+
self.classifier = nn.Sequential(
|
| 17 |
+
nn.Flatten(),
|
| 18 |
+
nn.Linear(in_features = num_labels*14*14, out_features=10, bias=True)
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
def forward(self, x):
|
| 22 |
+
x = self.block_one(x)
|
| 23 |
+
x = self.block_two(x)
|
| 24 |
+
x = self.classifier(x)
|
| 25 |
+
return x
|
.pytest_cache/pyt_project/data_setup.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from torch.utils.data import DataLoader
|
| 4 |
+
from torchvision import datasets, transforms
|
| 5 |
+
|
| 6 |
+
NUM_WORKERS = os.cpu_count()
|
| 7 |
+
|
| 8 |
+
def create_dataloaders(
|
| 9 |
+
train_dir: str,
|
| 10 |
+
test_dir: str,
|
| 11 |
+
transform: transforms.Compose,
|
| 12 |
+
batch_size: int,
|
| 13 |
+
num_workers: int=NUM_WORKERS
|
| 14 |
+
):
|
| 15 |
+
"""Creates training and testing DataLoaders.
|
| 16 |
+
|
| 17 |
+
Takes in a training directory and testing directory path and turns
|
| 18 |
+
them into PyTorch Datasets and then into PyTorch DataLoaders.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
train_dir: Path to training directory.
|
| 22 |
+
test_dir: Path to testing directory.
|
| 23 |
+
transform: torchvision transforms to perform on training and testing data.
|
| 24 |
+
batch_size: Number of samples per batch in each of the DataLoaders.
|
| 25 |
+
num_workers: An integer for number of workers per DataLoader.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
A tuple of (train_dataloader, test_dataloader, class_names).
|
| 29 |
+
Where class_names is a list of the target classes.
|
| 30 |
+
Example usage:
|
| 31 |
+
train_dataloader, test_dataloader, class_names = \
|
| 32 |
+
= create_dataloaders(train_dir=path/to/train_dir,
|
| 33 |
+
test_dir=path/to/test_dir,
|
| 34 |
+
transform=some_transform,
|
| 35 |
+
batch_size=32,
|
| 36 |
+
num_workers=4)
|
| 37 |
+
"""
|
| 38 |
+
# Use ImageFolder to create dataset(s)
|
| 39 |
+
train_data = datasets.ImageFolder(train_dir, transform=transform)
|
| 40 |
+
test_data = datasets.ImageFolder(test_dir, transform=transform)
|
| 41 |
+
|
| 42 |
+
# Get class names
|
| 43 |
+
class_names = train_data.classes
|
| 44 |
+
|
| 45 |
+
# Turn images into data loaders
|
| 46 |
+
train_dataloader = DataLoader(
|
| 47 |
+
train_data,
|
| 48 |
+
batch_size=batch_size,
|
| 49 |
+
shuffle=True,
|
| 50 |
+
num_workers=num_workers,
|
| 51 |
+
pin_memory=True,
|
| 52 |
+
)
|
| 53 |
+
test_dataloader = DataLoader(
|
| 54 |
+
test_data,
|
| 55 |
+
batch_size=batch_size,
|
| 56 |
+
shuffle=False,
|
| 57 |
+
num_workers=num_workers,
|
| 58 |
+
pin_memory=True,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
return train_dataloader, test_dataloader, class_names
|
.pytest_cache/pyt_project/engine.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from tqdm.auto import tqdm
|
| 6 |
+
|
| 7 |
+
def train_step(model: torch.nn.Module,
|
| 8 |
+
dataloader: torch.utils.data.DataLoader,
|
| 9 |
+
loss_fn: torch.nn.Module,
|
| 10 |
+
optimizer: torch.optim.Optimizer,
|
| 11 |
+
device: torch.device) -> Tuple[float, float]:
|
| 12 |
+
"""Trains a PyTorch model for a single epoch.
|
| 13 |
+
|
| 14 |
+
Turns a target PyTorch model to training mode and then
|
| 15 |
+
runs through all of the required training steps (forward
|
| 16 |
+
pass, loss calculation, optimizer step).
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
model: A PyTorch model to be trained.
|
| 20 |
+
dataloader: A DataLoader instance for the model to be trained on.
|
| 21 |
+
loss_fn: A PyTorch loss function to minimize.
|
| 22 |
+
optimizer: A PyTorch optimizer to help minimize the loss function.
|
| 23 |
+
device: A target device to compute on (e.g. "cuda" or "cpu").
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
A tuple of training loss and training accuracy metrics.
|
| 27 |
+
In the form (train_loss, train_accuracy). For example:
|
| 28 |
+
|
| 29 |
+
(0.1112, 0.8743)
|
| 30 |
+
"""
|
| 31 |
+
# Put model in train mode
|
| 32 |
+
model.train()
|
| 33 |
+
|
| 34 |
+
# Setup train loss and train accuracy values
|
| 35 |
+
train_loss, train_acc = 0, 0
|
| 36 |
+
|
| 37 |
+
# Loop through data loader data batches
|
| 38 |
+
for batch, (X, y) in enumerate(dataloader):
|
| 39 |
+
# Send data to target device
|
| 40 |
+
X, y = X.to(device), y.to(device)
|
| 41 |
+
|
| 42 |
+
# 1. Forward pass
|
| 43 |
+
y_pred = model(X)
|
| 44 |
+
|
| 45 |
+
# 2. Calculate and accumulate loss
|
| 46 |
+
loss = loss_fn(y_pred, y)
|
| 47 |
+
train_loss += loss.item()
|
| 48 |
+
|
| 49 |
+
# 3. Optimizer zero grad
|
| 50 |
+
optimizer.zero_grad()
|
| 51 |
+
|
| 52 |
+
# 4. Loss backward
|
| 53 |
+
loss.backward()
|
| 54 |
+
|
| 55 |
+
# 5. Optimizer step
|
| 56 |
+
optimizer.step()
|
| 57 |
+
|
| 58 |
+
# Calculate and accumulate accuracy metric across all batches
|
| 59 |
+
y_pred_class = torch.argmax(torch.softmax(y_pred, dim=1), dim=1)
|
| 60 |
+
train_acc += (y_pred_class == y).sum().item()/len(y_pred)
|
| 61 |
+
|
| 62 |
+
# Adjust metrics to get average loss and accuracy per batch
|
| 63 |
+
train_loss = train_loss / len(dataloader)
|
| 64 |
+
train_acc = train_acc / len(dataloader)
|
| 65 |
+
return train_loss, train_acc
|
| 66 |
+
|
| 67 |
+
def test_step(model: torch.nn.Module,
|
| 68 |
+
dataloader: torch.utils.data.DataLoader,
|
| 69 |
+
loss_fn: torch.nn.Module,
|
| 70 |
+
device: torch.device) -> Tuple[float, float]:
|
| 71 |
+
"""Tests a PyTorch model for a single epoch.
|
| 72 |
+
|
| 73 |
+
Turns a target PyTorch model to "eval" mode and then performs
|
| 74 |
+
a forward pass on a testing dataset.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
model: A PyTorch model to be tested.
|
| 78 |
+
dataloader: A DataLoader instance for the model to be tested on.
|
| 79 |
+
loss_fn: A PyTorch loss function to calculate loss on the test data.
|
| 80 |
+
device: A target device to compute on (e.g. "cuda" or "cpu").
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
A tuple of testing loss and testing accuracy metrics.
|
| 84 |
+
In the form (test_loss, test_accuracy). For example:
|
| 85 |
+
|
| 86 |
+
(0.0223, 0.8985)
|
| 87 |
+
"""
|
| 88 |
+
# Put model in eval mode
|
| 89 |
+
model.eval()
|
| 90 |
+
|
| 91 |
+
# Setup test loss and test accuracy values
|
| 92 |
+
test_loss, test_acc = 0, 0
|
| 93 |
+
|
| 94 |
+
# Turn on inference context manager
|
| 95 |
+
with torch.inference_mode():
|
| 96 |
+
# Loop through DataLoader batches
|
| 97 |
+
for batch, (X, y) in enumerate(dataloader):
|
| 98 |
+
# Send data to target device
|
| 99 |
+
X, y = X.to(device), y.to(device)
|
| 100 |
+
|
| 101 |
+
# 1. Forward pass
|
| 102 |
+
test_pred_logits = model(X)
|
| 103 |
+
|
| 104 |
+
# 2. Calculate and accumulate loss
|
| 105 |
+
loss = loss_fn(test_pred_logits, y)
|
| 106 |
+
test_loss += loss.item()
|
| 107 |
+
|
| 108 |
+
# Calculate and accumulate accuracy
|
| 109 |
+
test_pred_labels = test_pred_logits.argmax(dim=1)
|
| 110 |
+
test_acc += ((test_pred_labels == y).sum().item()/len(test_pred_labels))
|
| 111 |
+
|
| 112 |
+
# Adjust metrics to get average loss and accuracy per batch
|
| 113 |
+
test_loss = test_loss / len(dataloader)
|
| 114 |
+
test_acc = test_acc / len(dataloader)
|
| 115 |
+
return test_loss, test_acc
|
| 116 |
+
|
| 117 |
+
def train(model: torch.nn.Module,
|
| 118 |
+
train_dataloader: torch.utils.data.DataLoader,
|
| 119 |
+
test_dataloader: torch.utils.data.DataLoader,
|
| 120 |
+
optimizer: torch.optim.Optimizer,
|
| 121 |
+
loss_fn: torch.nn.Module,
|
| 122 |
+
epochs: int,
|
| 123 |
+
device: torch.device) -> Dict[str, List[float]]:
|
| 124 |
+
"""Trains and tests a PyTorch model.
|
| 125 |
+
|
| 126 |
+
Passes a target PyTorch models through train_step() and test_step()
|
| 127 |
+
functions for a number of epochs, training and testing the model
|
| 128 |
+
in the same epoch loop.
|
| 129 |
+
|
| 130 |
+
Calculates, prints and stores evaluation metrics throughout.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
model: A PyTorch model to be trained and tested.
|
| 134 |
+
train_dataloader: A DataLoader instance for the model to be trained on.
|
| 135 |
+
test_dataloader: A DataLoader instance for the model to be tested on.
|
| 136 |
+
optimizer: A PyTorch optimizer to help minimize the loss function.
|
| 137 |
+
loss_fn: A PyTorch loss function to calculate loss on both datasets.
|
| 138 |
+
epochs: An integer indicating how many epochs to train for.
|
| 139 |
+
device: A target device to compute on (e.g. "cuda" or "cpu").
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
A dictionary of training and testing loss as well as training and
|
| 143 |
+
testing accuracy metrics. Each metric has a value in a list for
|
| 144 |
+
each epoch.
|
| 145 |
+
In the form: {train_loss: [...],
|
| 146 |
+
train_acc: [...],
|
| 147 |
+
test_loss: [...],
|
| 148 |
+
test_acc: [...]}
|
| 149 |
+
For example if training for epochs=2:
|
| 150 |
+
{train_loss: [2.0616, 1.0537],
|
| 151 |
+
train_acc: [0.3945, 0.3945],
|
| 152 |
+
test_loss: [1.2641, 1.5706],
|
| 153 |
+
test_acc: [0.3400, 0.2973]}
|
| 154 |
+
"""
|
| 155 |
+
# Create empty results dictionary
|
| 156 |
+
results = {"train_loss": [],
|
| 157 |
+
"train_acc": [],
|
| 158 |
+
"test_loss": [],
|
| 159 |
+
"test_acc": []
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
# Loop through training and testing steps for a number of epochs
|
| 163 |
+
for epoch in tqdm(range(epochs)):
|
| 164 |
+
train_loss, train_acc = train_step(model=model,
|
| 165 |
+
dataloader=train_dataloader,
|
| 166 |
+
loss_fn=loss_fn,
|
| 167 |
+
optimizer=optimizer,
|
| 168 |
+
device=device)
|
| 169 |
+
test_loss, test_acc = test_step(model=model,
|
| 170 |
+
dataloader=test_dataloader,
|
| 171 |
+
loss_fn=loss_fn,
|
| 172 |
+
device=device)
|
| 173 |
+
|
| 174 |
+
# Print out what's happening
|
| 175 |
+
print(
|
| 176 |
+
f"Epoch: {epoch+1} | "
|
| 177 |
+
f"train_loss: {train_loss:.4f} | "
|
| 178 |
+
f"train_acc: {train_acc:.4f} | "
|
| 179 |
+
f"test_loss: {test_loss:.4f} | "
|
| 180 |
+
f"test_acc: {test_acc:.4f}"
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# Update results dictionary
|
| 184 |
+
results["train_loss"].append(train_loss)
|
| 185 |
+
results["train_acc"].append(train_acc)
|
| 186 |
+
results["test_loss"].append(test_loss)
|
| 187 |
+
results["test_acc"].append(test_acc)
|
| 188 |
+
|
| 189 |
+
# Return the filled results at the end of the epochs
|
| 190 |
+
return results
|
.pytest_cache/pyt_project/model_builder.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
class TinyVGG(nn.Module):
|
| 6 |
+
"""Creates the TinyVGG architecture.
|
| 7 |
+
|
| 8 |
+
Replicates the TinyVGG architecture from the CNN explainer website in PyTorch.
|
| 9 |
+
See the original architecture here: https://poloclub.github.io/cnn-explainer/
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
input_shape: An integer indicating number of input channels.
|
| 13 |
+
hidden_units: An integer indicating number of hidden units between layers.
|
| 14 |
+
output_shape: An integer indicating number of output units.
|
| 15 |
+
"""
|
| 16 |
+
def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.conv_block_1 = nn.Sequential(
|
| 19 |
+
nn.Conv2d(in_channels=input_shape,
|
| 20 |
+
out_channels=hidden_units,
|
| 21 |
+
kernel_size=3,
|
| 22 |
+
stride=1,
|
| 23 |
+
padding=0),
|
| 24 |
+
nn.ReLU(),
|
| 25 |
+
nn.Conv2d(in_channels=hidden_units,
|
| 26 |
+
out_channels=hidden_units,
|
| 27 |
+
kernel_size=3,
|
| 28 |
+
stride=1,
|
| 29 |
+
padding=0),
|
| 30 |
+
nn.ReLU(),
|
| 31 |
+
nn.MaxPool2d(kernel_size=2,
|
| 32 |
+
stride=2)
|
| 33 |
+
)
|
| 34 |
+
self.conv_block_2 = nn.Sequential(
|
| 35 |
+
nn.Conv2d(hidden_units, hidden_units, kernel_size=3, padding=0),
|
| 36 |
+
nn.ReLU(),
|
| 37 |
+
nn.Conv2d(hidden_units, hidden_units, kernel_size=3, padding=0),
|
| 38 |
+
nn.ReLU(),
|
| 39 |
+
nn.MaxPool2d(2)
|
| 40 |
+
)
|
| 41 |
+
self.classifier = nn.Sequential(
|
| 42 |
+
nn.Flatten(),
|
| 43 |
+
# Where did this in_features shape come from?
|
| 44 |
+
# It's because each layer of our network compresses and changes the shape of our inputs data.
|
| 45 |
+
nn.Linear(in_features=hidden_units*13*13,
|
| 46 |
+
out_features=output_shape)
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def forward(self, x: torch.Tensor):
|
| 50 |
+
x = self.conv_block_1(x)
|
| 51 |
+
x = self.conv_block_2(x)
|
| 52 |
+
x = self.classifier(x)
|
| 53 |
+
return x
|
| 54 |
+
# return self.classifier(self.block_2(self.block_1(x))) # <- leverage the benefits of operator fusion
|
.pytest_cache/pyt_project/train.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from torchvision import transforms
|
| 6 |
+
|
| 7 |
+
import data_setup, engine, model_builder, utils
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Setup hyperparameters
|
| 11 |
+
NUM_EPOCHS = 5
|
| 12 |
+
BATCH_SIZE = 32
|
| 13 |
+
HIDDEN_UNITS = 10
|
| 14 |
+
LEARNING_RATE = 0.001
|
| 15 |
+
|
| 16 |
+
# Setup directories
|
| 17 |
+
train_dir = "data/pizza_steak_sushi/train"
|
| 18 |
+
test_dir = "data/pizza_steak_sushi/test"
|
| 19 |
+
|
| 20 |
+
# Setup target device
|
| 21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
+
|
| 23 |
+
# Create transforms
|
| 24 |
+
data_transform = transforms.Compose([
|
| 25 |
+
transforms.Resize((64, 64)),
|
| 26 |
+
transforms.ToTensor()
|
| 27 |
+
])
|
| 28 |
+
|
| 29 |
+
# Create DataLoaders with help from data_setup.py
|
| 30 |
+
train_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(
|
| 31 |
+
train_dir=train_dir,
|
| 32 |
+
test_dir=test_dir,
|
| 33 |
+
transform=data_transform,
|
| 34 |
+
batch_size=BATCH_SIZE
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Create model with help from model_builder.py
|
| 38 |
+
model = model_builder.TinyVGG(
|
| 39 |
+
input_shape=3,
|
| 40 |
+
hidden_units=HIDDEN_UNITS,
|
| 41 |
+
output_shape=len(class_names)
|
| 42 |
+
).to(device)
|
| 43 |
+
|
| 44 |
+
# Set loss and optimizer
|
| 45 |
+
loss_fn = torch.nn.CrossEntropyLoss()
|
| 46 |
+
optimizer = torch.optim.Adam(model.parameters(),
|
| 47 |
+
lr=LEARNING_RATE)
|
| 48 |
+
|
| 49 |
+
# Start training with help from engine.py
|
| 50 |
+
engine.train(model=model,
|
| 51 |
+
train_dataloader=train_dataloader,
|
| 52 |
+
test_dataloader=test_dataloader,
|
| 53 |
+
loss_fn=loss_fn,
|
| 54 |
+
optimizer=optimizer,
|
| 55 |
+
epochs=NUM_EPOCHS,
|
| 56 |
+
device=device)
|
| 57 |
+
|
| 58 |
+
# Save the model with help from utils.py
|
| 59 |
+
utils.save_model(model=model,
|
| 60 |
+
target_dir="models",
|
| 61 |
+
model_name="05_going_modular_script_mode_tinyvgg_model.pth")
|
.pytest_cache/pyt_project/utils.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
def save_model(model: torch.nn.Module,
|
| 6 |
+
target_dir: str,
|
| 7 |
+
model_name: str):
|
| 8 |
+
"""Saves a PyTorch model to a target directory.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
model: A target PyTorch model to save.
|
| 12 |
+
target_dir: A directory for saving the model to.
|
| 13 |
+
model_name: A filename for the saved model. Should include
|
| 14 |
+
either ".pth" or ".pt" as the file extension.
|
| 15 |
+
|
| 16 |
+
Example usage:
|
| 17 |
+
save_model(model=model_0,
|
| 18 |
+
target_dir="models",
|
| 19 |
+
model_name="05_going_modular_tingvgg_model.pth")
|
| 20 |
+
"""
|
| 21 |
+
# Create target directory
|
| 22 |
+
target_dir_path = Path(target_dir)
|
| 23 |
+
target_dir_path.mkdir(parents=True,
|
| 24 |
+
exist_ok=True)
|
| 25 |
+
|
| 26 |
+
# Create model save path
|
| 27 |
+
assert model_name.endswith(".pth") or model_name.endswith(".pt"), "model_name should end with '.pt' or '.pth'"
|
| 28 |
+
model_save_path = target_dir_path / model_name
|
| 29 |
+
|
| 30 |
+
# Save the model state_dict()
|
| 31 |
+
print(f"[INFO] Saving model to: {model_save_path}")
|
| 32 |
+
torch.save(obj=model.state_dict(),
|
| 33 |
+
f=model_save_path)
|
.pytest_cache/v/cache/stepwise
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
app.py
CHANGED
|
@@ -1,54 +1,42 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
from torchvision import models, transforms
|
|
|
|
| 4 |
from PIL import Image
|
|
|
|
| 5 |
import requests
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
#
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
# with torch.no_grad():
|
| 32 |
-
# output = model(input_batch)
|
| 33 |
-
|
| 34 |
-
# # Get the predicted class index
|
| 35 |
-
# _, predicted_idx = torch.max(output, 1)
|
| 36 |
-
|
| 37 |
-
# # Get the predicted label
|
| 38 |
-
# predicted_label = labels[predicted_idx.item()]
|
| 39 |
-
|
| 40 |
-
# return predicted_label
|
| 41 |
-
|
| 42 |
-
# Gradio UI components
|
| 43 |
-
image_input = gr.Image()
|
| 44 |
-
output_label = gr.Textbox()
|
| 45 |
|
| 46 |
# Gradio interface
|
| 47 |
iface = gr.Interface(
|
| 48 |
-
fn=
|
| 49 |
-
inputs=
|
| 50 |
-
outputs=
|
| 51 |
-
|
| 52 |
)
|
| 53 |
|
| 54 |
# Launch the Gradio app
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
from torchvision import models, transforms
|
| 4 |
+
import torchvision
|
| 5 |
from PIL import Image
|
| 6 |
+
import numpy as np
|
| 7 |
import requests
|
| 8 |
+
from models.convmodel import MNISTnet
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
# Function to perform image classification
|
| 12 |
+
def classify_image(image):
|
| 13 |
+
#imdata = np.asarray(Image.open(image_path))
|
| 14 |
+
alltransforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
|
| 15 |
+
tensor_image = alltransforms(image)
|
| 16 |
+
# bring it to the shape model expects N, C, H, W
|
| 17 |
+
#print(tensor_image.shape)
|
| 18 |
+
model_input_tensor_image = tensor_image.unsqueeze(dim=0)
|
| 19 |
+
|
| 20 |
+
#initialize the model
|
| 21 |
+
loaded_model = MNISTnet(input_channels=1, num_labels=10, hidden_layers=5).eval()
|
| 22 |
+
#put the state dict values
|
| 23 |
+
model_state_dict_path = Path("/models/MNISTnet_state_dict.pt")
|
| 24 |
+
loaded_model.load_state_dict(torch.load(model_state_dict_path / "MNISTnet_state_dict.pt"))
|
| 25 |
+
# make the prediction
|
| 26 |
+
with torch.inference_mode():
|
| 27 |
+
predicted_idx = loaded_model(model_input_tensor_image).argmax(dim=1)
|
| 28 |
+
label_mapping = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
|
| 29 |
+
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
|
| 30 |
+
predicted_label = label_mapping[predicted_idx.item()]
|
| 31 |
+
#print(predicted_label)
|
| 32 |
+
return predicted_label
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Gradio interface
|
| 35 |
iface = gr.Interface(
|
| 36 |
+
fn=classify_image,
|
| 37 |
+
inputs=gr.Image(type="pil"),
|
| 38 |
+
outputs=gr.Label(num_top_classes=10),
|
| 39 |
+
examples=["lion.jpg", "cheetah.jpg"]
|
| 40 |
)
|
| 41 |
|
| 42 |
# Launch the Gradio app
|