text
stringlengths 1
93.6k
|
|---|
expConfig = self.expConfig
|
print('==== VALIDATING ALL CHECKPOINTS ====')
|
print(self.expConfig.EXPERIMENT_NAME)
|
print("ID: {}".format(expConfig.id))
|
print("RESTORE ID {}".format(expConfig.RESTORE_ID))
|
print('====================================')
|
for epoch in range(self.startFromEpoch, self.expConfig.EPOCHS):
|
self.loadFromDisk(expConfig.RESTORE_ID, epoch)
|
self.validate(epoch)
|
#print best mean dice
|
print("Best mean dice: {:.4f} at epoch {}".format(self.bestMeanDice, self.bestMeanDiceEpoch))
|
def makePredictions(self):
|
# model is already loaded from disk by constructor
|
expConfig = self.expConfig
|
assert(hasattr(expConfig, "RESTORE_ID"))
|
assert(hasattr(expConfig, "RESTORE_EPOCH"))
|
id = expConfig.RESTORE_ID
|
epoch = expConfig.RESTORE_EPOCH
|
print('============ PREDICTING ============')
|
print(self.expConfig.EXPERIMENT_NAME)
|
print("ID: {}".format(expConfig.id))
|
print("RESTORE ID {}".format(expConfig.RESTORE_ID))
|
print("RESTORE EPOCH {}".format(expConfig.RESTORE_EPOCH))
|
print('====================================')
|
basePath = os.path.join(self.predictionsBasePath, "{}_e{}".format(id, epoch))
|
if not os.path.exists(basePath):
|
os.makedirs(basePath)
|
with torch.no_grad():
|
for i, data in enumerate(self.challengeValDataLoader):
|
inputs, pids, xOffset, yOffset, zOffset = data
|
print("processing {}".format(pids[0]))
|
inputs = inputs.to(self.device)
|
#predict labels and bring into required shape
|
outputs = expConfig.net(inputs)
|
outputs = outputs[:, :, :, :, :155]
|
s = outputs.shape
|
fullsize = outputs.new_zeros((s[0], s[1], 240, 240, 155))
|
if xOffset + s[2] > 240:
|
outputs = outputs[:, :, :240-xOffset, :, :]
|
if yOffset + s[3] > 240:
|
outputs = outputs[:, :, :, :240 - yOffset, :]
|
if zOffset + s[4] > 155:
|
outputs = outputs[:, :, :, :, :155 - zOffset]
|
fullsize[:, :, xOffset:xOffset+s[2], yOffset:yOffset+s[3], zOffset:zOffset+s[4]] = outputs
|
#binarize output
|
wt, tc, et = fullsize.chunk(3, dim=1)
|
s = fullsize.shape
|
wt = (wt > 0.5).view(s[2], s[3], s[4])
|
tc = (tc > 0.5).view(s[2], s[3], s[4])
|
et = (et > 0.5).view(s[2], s[3], s[4])
|
result = fullsize.new_zeros((s[2], s[3], s[4]), dtype=torch.uint8)
|
result[wt] = 2
|
result[tc] = 1
|
result[et] = 4
|
npResult = result.cpu().numpy()
|
path = os.path.join(basePath, "{}.nii.gz".format(pids[0]))
|
utils.save_nii(path, npResult, None, None)
|
print("Done :)")
|
def train(self):
|
expConfig = self.expConfig
|
expConfig.optimizer.zero_grad()
|
print('======= RUNNING EXPERIMENT =======')
|
print(self.expConfig.EXPERIMENT_NAME)
|
print("ID: {}".format(expConfig.id))
|
print('==================================')
|
# for epoch in range(self.startFromEpoch, self.expConfig.EPOCHS):
|
epoch = self.startFromEpoch
|
while epoch < self.expConfig.EPOCHS and epoch <= self.bestMovingAvgEpoch + self.EARLY_STOPPING_AFTER_EPOCHS:
|
running_loss = 0.0
|
startTime = time.time()
|
# set net up training
|
self.expConfig.net.train()
|
for i, data in enumerate(self.trainDataLoader):
|
#load data
|
inputs, pid, labels = data
|
inputs, labels = inputs.to(self.device), labels.to(self.device)
|
#forward and backward pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.