text
stringlengths
1
93.6k
outputs = expConfig.net(inputs)
loss = expConfig.loss(outputs, labels)
del inputs, outputs, labels
loss.backward()
#update params
if i == len(self.trainDataLoader) - 1 or i % expConfig.VIRTUAL_BATCHSIZE == (expConfig.VIRTUAL_BATCHSIZE - 1):
expConfig.optimizer.step()
expConfig.optimizer.zero_grad()
#logging every K iterations
running_loss += loss.item()
del loss
if expConfig.LOG_EVERY_K_ITERATIONS > 0:
if i % expConfig.LOG_EVERY_K_ITERATIONS == (expConfig.LOG_EVERY_K_ITERATIONS - 1):
print('[%d, %5d] loss: %.3f' % (epoch, i + 1, running_loss / expConfig.LOG_EVERY_K_ITERATIONS))
if expConfig.LOG_MEMORY_EVERY_K_ITERATIONS: self.logMemoryUsage()
running_loss = 0.0
#logging at end of epoch
if expConfig.LOG_MEMORY_EVERY_EPOCH: self.logMemoryUsage()
if expConfig.LOG_EPOCH_TIME:
print("Time for epoch: {:.2f}s".format(time.time() - startTime))
if expConfig.LOG_LR_EVERY_EPOCH:
for param_group in expConfig.optimizer.param_groups:
print("Current lr: {:.6f}".format(param_group['lr']))
#validation at end of epoch
if epoch % expConfig.VALIDATE_EVERY_K_EPOCHS == expConfig.VALIDATE_EVERY_K_EPOCHS - 1:
self.validate(epoch)
#take lr sheudler step
if hasattr(expConfig, "lr_sheudler"):
if isinstance(expConfig.lr_sheudler, optim.lr_scheduler.ReduceLROnPlateau):
expConfig.lr_sheudler.step(self.movingAvg)
else:
expConfig.lr_sheudler.step()
#save model
if expConfig.SAVE_CHECKPOINTS:
self.saveToDisk(epoch)
epoch = epoch + 1
#print best mean dice
print("Best mean dice: {:.4f} at epoch {}".format(self.bestMeanDice, self.bestMeanDiceEpoch))
def validate(self, epoch):
#set net up for inference
self.expConfig.net.eval()
expConfig = self.expConfig
hausdorffEnabled = (expConfig.LOG_HAUSDORFF_EVERY_K_EPOCHS > 0)
logHausdorff = hausdorffEnabled and epoch % expConfig.LOG_HAUSDORFF_EVERY_K_EPOCHS == (expConfig.LOG_HAUSDORFF_EVERY_K_EPOCHS - 1)
startTime = time.time()
with torch.no_grad():
diceWT, diceTC, diceET = [], [], []
sensWT, sensTC, sensET = [], [], []
specWT, specTC, specET = [], [], []
hdWT, hdTC, hdET = [], [], []
#buckets = np.zeros(5)
for i, data in enumerate(self.valDataLoader):
# feed inputs through neural net
inputs, _, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = expConfig.net(inputs)
if expConfig.TRAIN_ORIGINAL_CLASSES:
outputsOriginal5 = outputs
outputs = torch.argmax(outputs, 1)
#hist, _ = np.histogram(outputs.cpu().numpy(), 5, (0, 4))
#buckets = buckets + hist
wt = bratsUtils.getWTMask(outputs)
tc = bratsUtils.getTCMask(outputs)
et = bratsUtils.getETMask(outputs)
labels = torch.argmax(labels, 1)
wtMask = bratsUtils.getWTMask(labels)
tcMask = bratsUtils.getTCMask(labels)
etMask = bratsUtils.getETMask(labels)
else:
#separate outputs channelwise
wt, tc, et = outputs.chunk(3, dim=1)
s = wt.shape
wt = wt.view(s[0], s[2], s[3], s[4])
tc = tc.view(s[0], s[2], s[3], s[4])
et = et.view(s[0], s[2], s[3], s[4])
wtMask, tcMask, etMask = labels.chunk(3, dim=1)
s = wtMask.shape
wtMask = wtMask.view(s[0], s[2], s[3], s[4])
tcMask = tcMask.view(s[0], s[2], s[3], s[4])
etMask = etMask.view(s[0], s[2], s[3], s[4])