text
stringlengths
1
93.6k
#TODO: add special evaluation metrics for original 5
#get dice metrics
diceWT.append(bratsUtils.dice(wt, wtMask))
diceTC.append(bratsUtils.dice(tc, tcMask))
diceET.append(bratsUtils.dice(et, etMask))
#get sensitivity metrics
sensWT.append(bratsUtils.sensitivity(wt, wtMask))
sensTC.append(bratsUtils.sensitivity(tc, tcMask))
sensET.append(bratsUtils.sensitivity(et, etMask))
#get specificity metrics
specWT.append(bratsUtils.specificity(wt, wtMask))
specTC.append(bratsUtils.specificity(tc, tcMask))
specET.append(bratsUtils.specificity(et, etMask))
#get hausdorff distance
if logHausdorff:
lists = [hdWT, hdTC, hdET]
results = [wt, tc, et]
masks = [wtMask, tcMask, etMask]
for i in range(3):
hd95 = bratsUtils.getHd95(results[i], masks[i])
#ignore edgcases in which no distance could be calculated
if (hd95 >= 0):
lists[i].append(hd95)
#calculate mean dice scores
meanDiceWT = np.mean(diceWT)
meanDiceTC = np.mean(diceTC)
meanDiceET = np.mean(diceET)
meanDice = np.mean([meanDiceWT, meanDiceTC, meanDiceET])
if (meanDice > self.bestMeanDice):
self.bestMeanDice = meanDice
self.bestMeanDiceEpoch = epoch
#update moving avg
self._updateMovingAvg(meanDice, epoch)
#print metrics
print("------ Validation epoch {} ------".format(epoch))
print("Dice WT: {:.4f} TC: {:.4f} ET: {:.4f} Mean: {:.4f} MovingAvg: {:.4f}".format(meanDiceWT, meanDiceTC, meanDiceET, meanDice, self.movingAvg))
print("Sensitivity WT: {:.4f} TC: {:.4f} ET: {:.4f}".format(np.mean(sensWT), np.mean(sensTC), np.mean(sensET)))
print("Specificity WT: {:.4f} TC: {:.4f} ET: {:.4f}".format(np.mean(specWT), np.mean(specTC), np.mean(specET)))
if logHausdorff:
print("Hausdorff WT: {:6.2f} TC: {:6.2f} ET: {:6.2f}".format(np.mean(hdWT), np.mean(hdTC), np.mean(hdET)))
#log metrics
if self.experiment is not None:
self.experiment.log_metrics({"wt": meanDiceWT, "tc": meanDiceTC, "et": meanDiceET, "mean": meanDice, "movingAvg": self.movingAvg}, "dice", epoch)
self.experiment.log_metrics({"wt": np.mean(sensWT), "tc": np.mean(sensTC), "et": np.mean(sensET)}, "sensitivity", epoch)
self.experiment.log_metrics({"wt": np.mean(specWT), "tc": np.mean(specTC), "et": np.mean(specET)}, "specificity", epoch)
if logHausdorff:
self.experiment.log_metrics({"wt": np.mean(hdWT), "tc:": np.mean(hdTC), "et": np.mean(hdET)}, "hausdorff", epoch)
#print(buckets)
#log validation time
if expConfig.LOG_VALIDATION_TIME:
print("Time for validation: {:.2f}s".format(time.time() - startTime))
print("--------------------------------")
def logMemoryUsage(self, additionalString=""):
if torch.cuda.is_available():
print(additionalString + "Memory {:.0f}Mb max, {:.0f}Mb current".format(
torch.cuda.max_memory_allocated() / 1024 / 1024, torch.cuda.memory_allocated() / 1024 / 1024))
def saveToDisk(self, epoch):
#gather things to save
saveDict = {"net_state_dict": self.expConfig.net.state_dict(),
"optimizer_state_dict": self.expConfig.optimizer.state_dict(),
"epoch": epoch,
"bestMeanDice": self.bestMeanDice,
"bestMeanDiceEpoch": self.bestMeanDiceEpoch,
"movingAvg": self.movingAvg,
"bestMovingAvgEpoch": self.bestMovingAvgEpoch,
"bestMovingAvg": self.bestMovingAvg}
if hasattr(self.expConfig, "lr_sheudler"):
saveDict["lr_sheudler_state_dict"] = self.expConfig.lr_sheudler.state_dict()
#save dict
basePath = self.checkpointsBasePathSave + "{}".format(self.expConfig.id)
path = basePath + "/e_{}.pt".format(epoch)
if not os.path.exists(basePath):
os.makedirs(basePath)
torch.save(saveDict, path)
def loadFromDisk(self, id, epoch):
path = self._getCheckpointPathLoad(id, epoch)
checkpoint = torch.load(path)
self.expConfig.net.load_state_dict(checkpoint["net_state_dict"])
#load optimizer: hack necessary because load_state_dict has bugs (See https://github.com/pytorch/pytorch/issues/2830#issuecomment-336194949)
self.expConfig.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
for state in self.expConfig.optimizer.state.values():
for k, v in state.items():