Spaces:
Running
Running
File size: 2,261 Bytes
9c4b1c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import functools
import torch
import torch.nn as nn
from networks.resnet import resnet50
from networks.base_model import BaseModel, init_weights
class Trainer(BaseModel):
def name(self):
return 'Trainer'
def __init__(self, opt):
super(Trainer, self).__init__(opt)
if self.isTrain and not opt.continue_train:
self.model = resnet50(pretrained=False, num_classes=1)
if not self.isTrain or opt.continue_train:
self.model = resnet50(num_classes=1)
if self.isTrain:
self.loss_fn = nn.BCEWithLogitsLoss()
# initialize optimizers
if opt.optim == 'adam':
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optim == 'sgd':
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=opt.lr, momentum=0.0, weight_decay=0)
else:
raise ValueError("optim should be [adam, sgd]")
# if not self.isTrain or opt.continue_train:
# self.load_networks(opt.epoch)
# self.model.to(opt.gpu_ids[0])
self.model.to(opt.device)
def adjust_learning_rate(self, min_lr=1e-6):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= 0.9
if param_group['lr'] < min_lr:
return False
self.lr = param_group['lr']
print('*'*25)
print(f'Changing lr from {param_group["lr"]/0.9} to {param_group["lr"]}')
print('*'*25)
return True
def set_input(self, input):
self.input = input[0].to(self.device)
self.label = input[1].to(self.device).float()
def forward(self):
self.output = self.model(self.input)
def get_loss(self):
return self.loss_fn(self.output.squeeze(1), self.label)
def optimize_parameters(self):
self.forward()
self.loss = self.loss_fn(self.output.squeeze(1), self.label)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
|