ERA_Assignment12 / network.py
sanjanatule's picture
Update network.py
eab807b
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision
import numpy as np
from torch_lr_finder import LRFinder
from torch.optim.lr_scheduler import OneCycleLR
import torch, torchvision
from torchvision import transforms
import numpy as np
import gradio as gr
from PIL import Image
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
import gradio as gr
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.loggers import TensorBoardLogger
from torchmetrics import Accuracy
from models import custom_resnet
class LitResnet(LightningModule):
def __init__(self, num_classes=10, lr=0.05):
super().__init__()
self.save_hyperparameters()
self.model = custom_resnet.Net()
self.criterion = nn.CrossEntropyLoss()
self.BATCH_SIZE = 512
self.torchmetrics_accuracy = Accuracy(task="multiclass", num_classes= self.hparams.num_classes)
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
x, y = batch
y_pred = self(x)
loss = self.criterion(y_pred, y)
acc = self.torchmetrics_accuracy(y_pred, y)
self.log('train_loss', loss, prog_bar=True, on_step=False, on_epoch=True)
self.log('train_acc', acc, prog_bar=True, on_step=False, on_epoch=True)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
y_test_pred = self(x)
loss = self.criterion(y_test_pred, y)
acc = self.torchmetrics_accuracy(y_test_pred, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr, weight_decay=1e-4)
scheduler = OneCycleLR(
optimizer,
max_lr= 5.38E-02, #self.hparams.lr,
pct_start = 5/self.trainer.max_epochs,
epochs=self.trainer.max_epochs,
steps_per_epoch=len(train_loader),
div_factor=100,verbose=False,
three_phase=False
)
return ([optimizer],[scheduler])