repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
Tangent-Bundle-Neural-Networks
|
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainWindPrediction.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Claudio Battiloro
"""
import warnings
#warnings.filterwarnings("ignore") to suppress warnings
import sys
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import torch
from architecture import RTNN, RMNN
device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu"))
import numpy as np
from utils import get_laplacians, project_data, topk
from data_util import WindPrediction
from tensorboard import program
import webbrowser
import numpy.ma as ma
import pickle as pkl
# Set Seeds
np.random.seed(0)
pl.seed_everything(0)
# Custom activation function: Identity activation
class linear_act(torch.nn.Module):
def __init__(self):
super(linear_act, self).__init__()
def forward(self, x):
return x
# Open Tensorboard
open_tb = 0
# Select Architecture
tnn_or_mnn = sys.argv[1]
#%% Data Importing
# Train
with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/data/windfields/data2016.pkl', 'rb') as file:
data_all = pkl.load(file)
#Test
with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/data/windfieldsdata2017.pkl', 'rb') as file:
data_all_test = pkl.load(file)
# Crop the data (the whole year will be slow)
how_many_days = 250 #250 ok
data_all = data_all[:how_many_days,:,:]
data_all_test = data_all_test[:how_many_days,:,:]
# Normalize the coordinates by the nominal earth radius to avoid numerical instability and
R = 6356.8
data_all[:,:,:3] = data_all[:,:,:3]/R
data_all_test[:,:,:3] = data_all_test[:,:,:3]/R
# Scale the data for numerical stability
data_all[:,:,3:] = data_all[:,:,3:]/(np.max(data_all[:,:,3:])-np.min(data_all[:,:,3:])) #-np.min(data_all[:,:,3:]))
data_all_test[:,:,3:] = (data_all_test[:,:,3:]-np.min(data_all_test[:,:,3:]))/(np.max(data_all_test[:,:,3:])-np.min(data_all_test[:,:,3:]))
n_max = data_all.shape[1]
p = 3 # Ambient Space Dimension
d = 2 # Manifold Dimension
# MonteCarlo Simulation Parameters
outer_num_rel = 8
num_avg_samples_coll = [100, 200,300, 400] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold
time_window_coll = [20,50,80] # 2nd Sampling: the actual mask
# Architecture Parameters
in_features = int((data_all.shape[2]-p)/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[2]-p # The last number is the output features. The lenght is the number of layers
n_layers = 3
in_features = [in_features]*n_layers
dense = []
lr = 1e-3
if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn":
sigma = linear_act()
else:
sigma = torch.nn.Tanh()
kappa = [2]*n_layers
num_epochs = 70
batch_size_ = 1
loss_function = torch.nn.MSELoss(reduction = 'sum')
weight_decay = 1e-3
# Logging Parameters
string = "Wind_Prediction" # Experiment Name
save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory
# Sheaf Laplacian Parameters
epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)}
gamma = .8
epsilon = .5
open_tb = 0 # Opens TensorBoard in the default browser
tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder
for num_avg_samples in num_avg_samples_coll:
print()
print("Testing with average number of points: "+str(num_avg_samples))
print()
# 1st Sampling (to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold)
p_samp = num_avg_samples/n_max
for time_window in time_window_coll:
print()
print("Testing with Time Window: "+str(time_window))
print()
min_mse = np.zeros((outer_num_rel,))
# 1st Sampling
for outer_rel in range(outer_num_rel):
sampling_set = np.random.binomial(1, p_samp, n_max)>0
data = data_all[:,sampling_set,-2:]
data_test = data_all_test[:,sampling_set,-2:]
coord = data_all[0,sampling_set,:3]
n = coord.shape[0]
if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn":
Delta_n_numpy, S,W,O_i_collection, d_hat, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn)
data_proj = np.array([project_data(data[el,:,:], O_i_collection) for el in range(data.shape[0])])
data_proj_test = np.array([project_data(data_test[el,:,:], O_i_collection) for el in range(data_test.shape[0])])
if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn":
Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn)
data_proj = data
data_proj_test = data_test
if tnn_or_mnn == "rnn":
Delta_n_numpy = np.eye(n)
data_proj = data
data_proj_test = data_test
# Normalize Laplacians
#[lambdas,_] = np.linalg.eigh(Delta_n_numpy)
#Delta_n_numpy = Delta_n_numpy/np.max(np.real(lambdas))
Delta_n = len(in_features)*[torch.from_numpy(Delta_n_numpy)]
data_torch = WindPrediction(data_proj,time_window,device)
data_torch_val = WindPrediction(data_proj_test,time_window,device)
hparams ={'in_features': in_features,\
'L': Delta_n,\
'lr': lr,\
'weight_decay': weight_decay,\
'sigma': sigma,\
'kappa': kappa,\
'time_window': time_window,\
'loss_function': loss_function,\
'device': device}
if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn":
net = RTNN(**hparams).to(device)
else:
net = RMNN(**hparams).to(device)
train_loader = \
torch.utils.data.DataLoader(
data_torch, batch_size=batch_size_, batch_sampler=None, shuffle=True, num_workers=0)
val_loader =\
torch.utils.data.DataLoader(
data_torch_val, batch_size=how_many_days-2*time_window, batch_sampler=None, shuffle=False, num_workers=0)
logger = pl.loggers.TensorBoardLogger(name=string, save_dir=save_dir_)
early_stop_callback = EarlyStopping(monitor="test_mse", min_delta=1e-6, patience=5, verbose=False, mode="min")
trainer = pl.Trainer(max_epochs=num_epochs,logger = logger, log_every_n_steps= 1,
accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback])#,check_val_every_n_epoch=int(num_epochs/10)
trainer.fit(net, train_loader,val_loader)
min_mse[outer_rel] = net.min_mse_val
min_mse = min_mse[~np.isnan(min_mse)] # Removes eventual corrupted runs (divergent, outliers, etc...)
#min_mse = min_mse[min_mse < 1.5]
to_delete = topk(min_mse,2)
mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1])
min_mse = ma.masked_array(min_mse, mask = mask)
try:
with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file:
mse_dic = pkl.load(file)
print("Results file already exisisting... Updating!")
try:
tmp = mse_dic["avg_points"+str(num_avg_samples)]
tmp["time_window"+str(time_window)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse}
mse_dic["avg_points"+str(num_avg_samples)] = tmp
except:
mse_dic["avg_points"+str(num_avg_samples)] = {"time_window"+str(time_window):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}
with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file:
pkl.dump(mse_dic, file)
print("Updated!")
except:
print("Results file not found... Creating!")
mse_dic = {"avg_points"+str(num_avg_samples):{"time_window"+str(time_window):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}}
with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file:
pkl.dump(mse_dic, file)
print(mse_dic)
# Tensor Board Monitoring
if open_tb:
tb = program.TensorBoard()
tb.configure(argv=[None, '--logdir', tracking_address])
url = tb.launch()
print(f"Tensorflow listening on {url}")
webbrowser.open_new(url)
input("Press Enter to Exit")
| 9,072
| 47.518717
| 191
|
py
|
AP-BSN
|
AP-BSN-master/test.py
|
import argparse, os
import torch
from src.util.config_parse import ConfigParser
from src.trainer import get_trainer_class
def main():
# parsing configuration
args = argparse.ArgumentParser()
args.add_argument('-s', '--session_name', default=None, type=str)
args.add_argument('-c', '--config', default=None, type=str)
args.add_argument('-e', '--ckpt_epoch', default=0, type=int)
args.add_argument('-g', '--gpu', default=None, type=str)
args.add_argument( '--pretrained', default=None, type=str)
args.add_argument( '--thread', default=4, type=int)
args.add_argument( '--self_en', action='store_true')
args.add_argument( '--test_img', default=None, type=str)
args.add_argument( '--test_dir', default=None, type=str)
args = args.parse_args()
assert args.config is not None, 'config file path is needed'
if args.session_name is None:
args.session_name = args.config # set session name to config file name
cfg = ConfigParser(args)
# device setting
if cfg['gpu'] is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['gpu']
# intialize trainer
trainer = get_trainer_class(cfg['trainer'])(cfg)
# test
trainer.test()
if __name__ == '__main__':
main()
| 1,335
| 30.069767
| 78
|
py
|
AP-BSN
|
AP-BSN-master/prep.py
|
import argparse, os
import multiprocessing as mp
from importlib import import_module
from src.datahandler import get_dataset_class
def main():
# parsing configuration
args = argparse.ArgumentParser()
args.add_argument('-d', '--dataset', default='', type=str)
args.add_argument('-s', '--patch_size', default=512, type=int)
args.add_argument('-o', '--overlap', default=128, type=int)
args.add_argument('-p', '--process', default=8, type=int)
args = args.parse_args()
assert args.dataset != '', 'dataset name is needed'
dataset = get_dataset_class(args.dataset)()
# check what the dataset have images
data_sample = dataset.__getitem__(0)
flag_c, flag_n = 'clean' in data_sample, 'real_noisy' in data_sample
pool = mp.Pool(args.process)
mp_args = [[data_idx, args.patch_size, args.overlap, flag_c, False, flag_n] for data_idx in range(dataset.__len__())]
pool.starmap(dataset.prep_save, mp_args)
if __name__ == '__main__':
main()
| 1,023
| 27.444444
| 121
|
py
|
AP-BSN
|
AP-BSN-master/train.py
|
import argparse, os
from importlib import import_module
import torch
from src.util.config_parse import ConfigParser
from src.trainer import get_trainer_class
def main():
# parsing configuration
args = argparse.ArgumentParser()
args.add_argument('-s', '--session_name', default=None, type=str)
args.add_argument('-c', '--config', default=None, type=str)
args.add_argument('-r', '--resume', action='store_true')
args.add_argument('-g', '--gpu', default=None, type=str)
args.add_argument( '--thread', default=4, type=int)
args = args.parse_args()
assert args.config is not None, 'config file path is needed'
if args.session_name is None:
args.session_name = args.config # set session name to config file name
cfg = ConfigParser(args)
# device setting
if cfg['gpu'] is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['gpu']
# intialize trainer
trainer = get_trainer_class(cfg['trainer'])(cfg)
# train
trainer.train()
if __name__ == '__main__':
main()
| 1,089
| 26.25
| 78
|
py
|
AP-BSN
|
AP-BSN-master/src/trainer/base.py
|
import os
import math
import time, datetime
import cv2
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.autograd as autograd
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from ..util.dnd_submission.bundle_submissions import bundle_submissions_srgb
from ..util.dnd_submission.dnd_denoise import denoise_srgb
from ..util.dnd_submission.pytorch_wrapper import pytorch_denoiser
from ..loss import Loss
from ..datahandler import get_dataset_class
from ..util.file_manager import FileManager
from ..util.logger import Logger
from ..util.util import human_format, np2tensor, rot_hflip_img, psnr, ssim, tensor2np, imread_tensor
from ..util.util import pixel_shuffle_down_sampling, pixel_shuffle_up_sampling
status_len = 13
class BaseTrainer():
'''
Base trainer class to implement other trainer classes.
below function should be implemented in each of trainer class.
'''
def test(self):
raise NotImplementedError('define this function for each trainer')
def validation(self):
raise NotImplementedError('define this function for each trainer')
def _set_module(self):
# return dict form with model name.
raise NotImplementedError('define this function for each trainer')
def _set_optimizer(self):
# return dict form with each coresponding model name.
raise NotImplementedError('define this function for each trainer')
def _forward_fn(self, module, loss, data):
# forward with model, loss function and data.
# return output of loss function.
raise NotImplementedError('define this function for each trainer')
#----------------------------#
# Train/Test functions #
#----------------------------#
def __init__(self, cfg):
self.session_name = cfg['session_name']
self.checkpoint_folder = 'checkpoint'
# get file manager and logger class
self.file_manager = FileManager(self.session_name)
self.logger = Logger()
self.cfg = cfg
self.train_cfg = cfg['training']
self.val_cfg = cfg['validation']
self.test_cfg = cfg['test']
self.ckpt_cfg = cfg['checkpoint']
def train(self):
# initializing
self._before_train()
# warmup
if self.epoch == 1 and self.train_cfg['warmup']:
self._warmup()
# training
for self.epoch in range(self.epoch, self.max_epoch+1):
self._before_epoch()
self._run_epoch()
self._after_epoch()
self._after_train()
def _warmup(self):
self._set_status('warmup')
# make dataloader iterable.
self.train_dataloader_iter = {}
for key in self.train_dataloader:
self.train_dataloader_iter[key] = iter(self.train_dataloader[key])
warmup_iter = self.train_cfg['warmup_iter']
if warmup_iter > self.max_iter:
self.logger.info('currently warmup support 1 epoch as maximum. warmup iter is replaced to 1 epoch iteration. %d -> %d' \
% (warmup_iter, self.max_iter))
warmup_iter = self.max_iter
for self.iter in range(1, warmup_iter+1):
self._adjust_warmup_lr(warmup_iter)
self._before_step()
self._run_step()
self._after_step()
def _before_test(self, dataset_load):
# initialing
self.module = self._set_module()
self._set_status('test')
# load checkpoint file
ckpt_epoch = self._find_last_epoch() if self.cfg['ckpt_epoch'] == -1 else self.cfg['ckpt_epoch']
ckpt_name = self.cfg['pretrained'] if self.cfg['pretrained'] is not None else None
self.load_checkpoint(ckpt_epoch, name=ckpt_name)
self.epoch = self.cfg['ckpt_epoch'] # for print or saving file name.
# test dataset loader
if dataset_load:
self.test_dataloader = self._set_dataloader(self.test_cfg, batch_size=1, shuffle=False, num_workers=self.cfg['thread'])
# wrapping and device setting
if self.cfg['gpu'] != 'None':
# model to GPU
self.model = {key: nn.DataParallel(self.module[key]).cuda() for key in self.module}
else:
self.model = {key: nn.DataParallel(self.module[key]) for key in self.module}
# evaluation mode and set status
self._eval_mode()
self._set_status('test %03d'%self.epoch)
# start message
self.logger.highlight(self.logger.get_start_msg())
# set denoiser
self._set_denoiser()
# wrapping denoiser w/ self_ensemble
if self.cfg['self_en']:
# (warning) self_ensemble cannot be applied with multi-input model
denoiser_fn = self.denoiser
self.denoiser = lambda *input_data: self.self_ensemble(denoiser_fn, *input_data)
# wrapping denoiser w/ crop test
if 'crop' in self.cfg['test']:
# (warning) self_ensemble cannot be applied with multi-input model
denoiser_fn = self.denoiser
self.denoiser = lambda *input_data: self.crop_test(denoiser_fn, *input_data, size=self.cfg['test']['crop'])
def _before_train(self):
# cudnn
torch.backends.cudnn.benchmark = False
# initialing
self.module = self._set_module()
# training dataset loader
self.train_dataloader = self._set_dataloader(self.train_cfg, batch_size=self.train_cfg['batch_size'], shuffle=True, num_workers=self.cfg['thread'])
# validation dataset loader
if self.val_cfg['val']:
self.val_dataloader = self._set_dataloader(self.val_cfg, batch_size=1, shuffle=False, num_workers=self.cfg['thread'])
# other configuration
self.max_epoch = self.train_cfg['max_epoch']
self.epoch = self.start_epoch = 1
max_len = self.train_dataloader['dataset'].dataset.__len__() # base number of iteration works for dataset named 'dataset'
self.max_iter = math.ceil(max_len / self.train_cfg['batch_size'])
self.loss = Loss(self.train_cfg['loss'], self.train_cfg['tmp_info'])
self.loss_dict = {'count':0}
self.tmp_info = {}
self.loss_log = []
# set optimizer
self.optimizer = self._set_optimizer()
for opt in self.optimizer.values():
opt.zero_grad(set_to_none=True)
# resume
if self.cfg["resume"]:
# find last checkpoint
load_epoch = self._find_last_epoch()
# load last checkpoint
self.load_checkpoint(load_epoch)
self.epoch = load_epoch+1
# logger initialization
self.logger = Logger((self.max_epoch, self.max_iter), log_dir=self.file_manager.get_dir(''), log_file_option='a')
else:
# logger initialization
self.logger = Logger((self.max_epoch, self.max_iter), log_dir=self.file_manager.get_dir(''), log_file_option='w')
# tensorboard
tboard_time = datetime.datetime.now().strftime('%m-%d-%H-%M')
self.tboard = SummaryWriter(log_dir=self.file_manager.get_dir('tboard/%s'%tboard_time))
# wrapping and device setting
if self.cfg['gpu'] != 'None':
# model to GPU
self.model = {key: nn.DataParallel(self.module[key]).cuda() for key in self.module}
# optimizer to GPU
for optim in self.optimizer.values():
for state in optim.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
else:
self.model = {key: nn.DataParallel(self.module[key]) for key in self.module}
# start message
self.logger.info(self.summary())
self.logger.start((self.epoch-1, 0))
self.logger.highlight(self.logger.get_start_msg())
def _after_train(self):
# finish message
self.logger.highlight(self.logger.get_finish_msg())
def _before_epoch(self):
self._set_status('epoch %03d/%03d'%(self.epoch, self.max_epoch))
# make dataloader iterable.
self.train_dataloader_iter = {}
for key in self.train_dataloader:
self.train_dataloader_iter[key] = iter(self.train_dataloader[key])
# model training mode
self._train_mode()
def _run_epoch(self):
for self.iter in range(1, self.max_iter+1):
self._before_step()
self._run_step()
self._after_step()
def _after_epoch(self):
# save checkpoint
if self.epoch >= self.ckpt_cfg['start_epoch']:
if (self.epoch-self.ckpt_cfg['start_epoch'])%self.ckpt_cfg['interval_epoch'] == 0:
self.save_checkpoint()
# validation
if self.val_cfg['val']:
if self.epoch >= self.val_cfg['start_epoch'] and self.val_cfg['val']:
if (self.epoch-self.val_cfg['start_epoch']) % self.val_cfg['interval_epoch'] == 0:
self._eval_mode()
self._set_status('val %03d'%self.epoch)
self.validation()
def _before_step(self):
pass
def _run_step(self):
# get data (data should be dictionary of Tensors)
data = {}
for key in self.train_dataloader_iter:
data[key] = next(self.train_dataloader_iter[key])
# to device
if self.cfg['gpu'] != 'None':
for dataset_key in data:
for key in data[dataset_key]:
data[dataset_key][key] = data[dataset_key][key].cuda()
# forward, cal losses, backward)
losses, tmp_info = self._forward_fn(self.model, self.loss, data)
losses = {key: losses[key].mean() for key in losses}
tmp_info = {key: tmp_info[key].mean() for key in tmp_info}
# backward
total_loss = sum(v for v in losses.values())
total_loss.backward()
# optimizer step
for opt in self.optimizer.values():
opt.step()
# zero grad
for opt in self.optimizer.values():
opt.zero_grad(set_to_none=True)
# save losses and tmp_info
for key in losses:
if key != 'count':
if key in self.loss_dict:
self.loss_dict[key] += float(losses[key])
else:
self.loss_dict[key] = float(losses[key])
for key in tmp_info:
if key in self.tmp_info:
self.tmp_info[key] += float(tmp_info[key])
else:
self.tmp_info[key] = float(tmp_info[key])
self.loss_dict['count'] += 1
def _after_step(self):
# adjust learning rate
self._adjust_lr()
# print loss
if (self.iter%self.cfg['log']['interval_iter']==0 and self.iter!=0) or (self.iter == self.max_iter):
self.print_loss()
# print progress
self.logger.print_prog_msg((self.epoch-1, self.iter-1))
def test_dataloader_process(self, dataloader, add_con=0., floor=False, img_save=True, img_save_path=None, info=True):
'''
do test or evaluation process for each dataloader
include following steps:
1. denoise image
2. calculate PSNR & SSIM
3. (optional) save denoised image
Args:
dataloader : dataloader to be tested.
add_con : add constant to denoised image.
floor : floor denoised image. (default range is [0, 255])
img_save : whether to save denoised and clean images.
img_save_path (optional) : path to save denoised images.
info (optional) : whether to print info.
Returns:
psnr : total PSNR score of dataloaer results or None (if clean image is not available)
ssim : total SSIM score of dataloder results or None (if clean image is not available)
'''
# make directory
self.file_manager.make_dir(img_save_path)
# test start
psnr_sum = 0.
ssim_sum = 0.
count = 0
for idx, data in enumerate(dataloader):
# to device
if self.cfg['gpu'] != 'None':
for key in data:
data[key] = data[key].cuda()
# forward
input_data = [data[arg] for arg in self.cfg['model_input']]
denoised_image = self.denoiser(*input_data)
# add constant and floor (if floor is on)
denoised_image += add_con
if floor: denoised_image = torch.floor(denoised_image)
# evaluation
if 'clean' in data:
psnr_value = psnr(denoised_image, data['clean'])
ssim_value = ssim(denoised_image, data['clean'])
psnr_sum += psnr_value
ssim_sum += ssim_value
count += 1
# image save
if img_save:
# to cpu
if 'clean' in data:
clean_img = data['clean'].squeeze(0).cpu()
if 'real_noisy' in self.cfg['model_input']: noisy_img = data['real_noisy']
elif 'syn_noisy' in self.cfg['model_input']: noisy_img = data['syn_noisy']
elif 'noisy' in self.cfg['model_input']: noisy_img = data['noisy']
else: noisy_img = None
if noisy_img is not None: noisy_img = noisy_img.squeeze(0).cpu()
denoi_img = denoised_image.squeeze(0).cpu()
# write psnr value on file name
denoi_name = '%04d_DN_%.2f'%(idx, psnr_value) if 'clean' in data else '%04d_DN'%idx
# imwrite
if 'clean' in data: self.file_manager.save_img_tensor(img_save_path, '%04d_CL'%idx, clean_img)
if noisy_img is not None: self.file_manager.save_img_tensor(img_save_path, '%04d_N'%idx, noisy_img)
self.file_manager.save_img_tensor(img_save_path, denoi_name, denoi_img)
# procedure log msg
if info:
if 'clean' in data:
self.logger.note('[%s] testing... %04d/%04d. PSNR : %.2f dB'%(self.status, idx, dataloader.__len__(), psnr_value), end='\r')
else:
self.logger.note('[%s] testing... %04d/%04d.'%(self.status, idx, dataloader.__len__()), end='\r')
# final log msg
if count > 0:
self.logger.val('[%s] Done! PSNR : %.2f dB, SSIM : %.3f'%(self.status, psnr_sum/count, ssim_sum/count))
else:
self.logger.val('[%s] Done!'%self.status)
# return
if count == 0:
return None, None
else:
return psnr_sum/count, ssim_sum/count
def test_img(self, image_dir, save_dir='./'):
'''
Inference a single image.
'''
# load image
noisy = np2tensor(cv2.imread(image_dir))
noisy = noisy.unsqueeze(0).float()
# to device
if self.cfg['gpu'] != 'None':
noisy = noisy.cuda()
# forward
denoised = self.denoiser(noisy)
# post-process
denoised += self.test_cfg['add_con']
if self.test_cfg['floor']: denoised = torch.floor(denoised)
# save image
denoised = tensor2np(denoised)
denoised = denoised.squeeze(0)
name = image_dir.split('/')[-1].split('.')[0]
cv2.imwrite(os.path.join(save_dir, name+'_DN.png'), denoised)
# print message
self.logger.note('[%s] saved : %s'%(self.status, os.path.join(save_dir, name+'_DN.png')))
def test_dir(self, direc):
'''
Inference all images in the directory.
'''
for ff in [f for f in os.listdir(direc) if os.path.isfile(os.path.join(direc, f))]:
os.makedirs(os.path.join(direc, 'results'), exist_ok=True)
self.test_img(os.path.join(direc, ff), os.path.join(direc, 'results'))
def test_DND(self, img_save_path):
'''
Benchmarking DND dataset.
'''
# make directories for .mat & image saving
self.file_manager.make_dir(img_save_path)
self.file_manager.make_dir(img_save_path + '/mat')
if self.test_cfg['save_image']: self.file_manager.make_dir(img_save_path + '/img')
def wrap_denoiser(Inoisy, nlf, idx, kidx):
noisy = 255 * torch.from_numpy(Inoisy)
# to device
if self.cfg['gpu'] != 'None':
noisy = noisy.cuda()
noisy = autograd.Variable(noisy)
# processing
noisy = noisy.permute(2,0,1)
noisy = self.test_dataloader['dataset'].dataset._pre_processing({'real_noisy': noisy})['real_noisy']
noisy = noisy.view(1,noisy.shape[0], noisy.shape[1], noisy.shape[2])
denoised = self.denoiser(noisy)
denoised += self.test_cfg['add_con']
if self.test_cfg['floor']: denoised = torch.floor(denoised)
denoised = denoised[0,...].cpu().numpy()
denoised = np.transpose(denoised, [1,2,0])
# image save
if self.test_cfg['save_image'] and False:
self.file_manager.save_img_numpy(img_save_path+'/img', '%02d_%02d_N'%(idx, kidx), 255*Inoisy)
self.file_manager.save_img_numpy(img_save_path+'/img', '%02d_%02d_DN'%(idx, kidx), denoised)
return denoised / 255
denoise_srgb(wrap_denoiser, './dataset/DND/dnd_2017', self.file_manager.get_dir(img_save_path+'/mat'))
bundle_submissions_srgb(self.file_manager.get_dir(img_save_path+'/mat'))
# info
self.logger.val('[%s] Done!'%self.status)
def _set_denoiser(self):
if hasattr(self.model['denoiser'].module, 'denoise'):
self.denoiser = self.model['denoiser'].module.denoise
else:
self.denoiser = self.model['denoiser'].module
@torch.no_grad()
def crop_test(self, fn, x, size=512, overlap=0):
'''
crop test image and inference due to memory problem
'''
b,c,h,w = x.shape
denoised = torch.zeros_like(x)
for i in range(0,h,size-overlap):
for j in range(0,w,size-overlap):
end_i = min(i+size, h)
end_j = min(j+size, w)
x_crop = x[...,i:end_i,j:end_j]
denoised_crop = fn(x_crop)
start_i = overlap if i != 0 else 0
start_j = overlap if j != 0 else 0
denoised[..., i+start_i:end_i, j+start_j:end_j] = denoised_crop[..., start_i:, start_j:]
return denoised
@torch.no_grad()
def self_ensemble(self, fn, x):
'''
Geomery self-ensemble function
Note that in this function there is no gradient calculation.
Args:
fn : denoiser function
x : input image
Return:
result : self-ensembled image
'''
result = torch.zeros_like(x)
for i in range(8):
tmp = fn(rot_hflip_img(x, rot_times=i%4, hflip=i//4))
tmp = rot_hflip_img(tmp, rot_times=4-i%4)
result += rot_hflip_img(tmp, hflip=i//4)
return result / 8
#----------------------------#
# Utility functions #
#----------------------------#
def print_loss(self):
temporal_loss = 0.
for key in self.loss_dict:
if key != 'count':
temporal_loss += self.loss_dict[key]/self.loss_dict['count']
self.loss_log += [temporal_loss]
if len(self.loss_log) > 100: self.loss_log.pop(0)
# print status and learning rate
loss_out_str = '[%s] %04d/%04d, lr:%s ∣ '%(self.status, self.iter, self.max_iter, "{:.1e}".format(self._get_current_lr()))
global_iter = (self.epoch-1)*self.max_iter + self.iter
# print losses
avg_loss = np.mean(self.loss_log)
loss_out_str += 'avg_100 : %.3f ∣ '%(avg_loss)
self.tboard.add_scalar('loss/avg_100', avg_loss, global_iter)
for key in self.loss_dict:
if key != 'count':
loss = self.loss_dict[key]/self.loss_dict['count']
loss_out_str += '%s : %.3f ∣ '%(key, loss)
self.tboard.add_scalar('loss/%s'%key, loss, global_iter)
self.loss_dict[key] = 0.
# print temporal information
if len(self.tmp_info) > 0:
loss_out_str += '\t['
for key in self.tmp_info:
loss_out_str += ' %s : %.2f'%(key, self.tmp_info[key]/self.loss_dict['count'])
self.tmp_info[key] = 0.
loss_out_str += ' ]'
# reset
self.loss_dict['count'] = 0
self.logger.info(loss_out_str)
def save_checkpoint(self):
checkpoint_name = self._checkpoint_name(self.epoch)
torch.save({'epoch': self.epoch,
'model_weight': {key:self.model[key].module.state_dict() for key in self.model},
'optimizer_weight': {key:self.optimizer[key].state_dict() for key in self.optimizer}},
os.path.join(self.file_manager.get_dir(self.checkpoint_folder), checkpoint_name))
def load_checkpoint(self, load_epoch=0, name=None):
if name is None:
# if scratch, return
if load_epoch == 0: return
# load from local checkpoint folder
file_name = os.path.join(self.file_manager.get_dir(self.checkpoint_folder), self._checkpoint_name(load_epoch))
else:
# load from global checkpoint folder
file_name = os.path.join('./ckpt', name)
# check file exist
assert os.path.isfile(file_name), 'there is no checkpoint: %s'%file_name
# load checkpoint (epoch, model_weight, optimizer_weight)
saved_checkpoint = torch.load(file_name)
self.epoch = saved_checkpoint['epoch']
for key in self.module:
self.module[key].load_state_dict(saved_checkpoint['model_weight'][key])
if hasattr(self, 'optimizer'):
for key in self.optimizer:
self.optimizer[key].load_state_dict(saved_checkpoint['optimizer_weight'][key])
# print message
self.logger.note('[%s] model loaded : %s'%(self.status, file_name))
def _checkpoint_name(self, epoch):
return self.session_name + '_%03d'%epoch + '.pth'
def _find_last_epoch(self):
checkpoint_list = os.listdir(self.file_manager.get_dir(self.checkpoint_folder))
epochs = [int(ckpt.replace('%s_'%self.session_name, '').replace('.pth', '')) for ckpt in checkpoint_list]
assert len(epochs) > 0, 'There is no resumable checkpoint on session %s.'%self.session_name
return max(epochs)
def _get_current_lr(self):
for first_optim in self.optimizer.values():
for param_group in first_optim.param_groups:
return param_group['lr']
def _set_dataloader(self, dataset_cfg, batch_size, shuffle, num_workers):
dataloader = {}
dataset_dict = dataset_cfg['dataset']
if not isinstance(dataset_dict, dict):
dataset_dict = {'dataset': dataset_dict}
for key in dataset_dict:
args = dataset_cfg[key + '_args']
dataset = get_dataset_class(dataset_dict[key])(**args)
dataloader[key] = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=False)
return dataloader
def _set_one_optimizer(self, opt, parameters, lr):
lr = float(self.train_cfg['init_lr'])
if opt['type'] == 'SGD':
return optim.SGD(parameters, lr=lr, momentum=float(opt['SGD']['momentum']), weight_decay=float(opt['SGD']['weight_decay']))
elif opt['type'] == 'Adam':
return optim.Adam(parameters, lr=lr, betas=opt['Adam']['betas'])
elif opt['type'] == 'AdamW':
return optim.Adam(parameters, lr=lr, betas=opt['AdamW']['betas'], weight_decay=float(opt['AdamW']['weight_decay']))
else:
raise RuntimeError('ambiguious optimizer type: {}'.format(opt['type']))
def _adjust_lr(self):
sched = self.train_cfg['scheduler']
if sched['type'] == 'step':
'''
step decreasing scheduler
Args:
step_size: step size(epoch) to decay the learning rate
gamma: decay rate
'''
if self.iter == self.max_iter:
args = sched['step']
if self.epoch % args['step_size'] == 0:
for optimizer in self.optimizer.values():
lr_before = optimizer.param_groups[0]['lr']
for param_group in optimizer.param_groups:
param_group["lr"] = lr_before * float(args['gamma'])
elif sched['type'] == 'linear':
'''
linear decreasing scheduler
Args:
step_size: step size(epoch) to decrease the learning rate
gamma: decay rate for reset learning rate
'''
args = sched['linear']
if not hasattr(self, 'reset_lr'):
self.reset_lr = float(self.train_cfg['init_lr']) * float(args['gamma'])**((self.epoch-1)//args['step_size'])
# reset lr to initial value
if self.epoch % args['step_size'] == 0 and self.iter == self.max_iter:
self.reset_lr = float(self.train_cfg['init_lr']) * float(args['gamma'])**(self.epoch//args['step_size'])
for optimizer in self.optimizer.values():
for param_group in optimizer.param_groups:
param_group["lr"] = self.reset_lr
# linear decaying
else:
ratio = ((self.epoch + (self.iter)/self.max_iter - 1) % args['step_size']) / args['step_size']
curr_lr = (1-ratio) * self.reset_lr
for optimizer in self.optimizer.values():
for param_group in optimizer.param_groups:
param_group["lr"] = curr_lr
else:
raise RuntimeError('ambiguious scheduler type: {}'.format(sched['type']))
def _adjust_warmup_lr(self, warmup_iter):
init_lr = float(self.train_cfg['init_lr'])
warmup_lr = init_lr * self.iter / warmup_iter
for optimizer in self.optimizer.values():
for param_group in optimizer.param_groups:
param_group["lr"] = warmup_lr
def _train_mode(self):
for key in self.model:
self.model[key].train()
def _eval_mode(self):
for key in self.model:
self.model[key].eval()
def _set_status(self, status:str):
assert len(status) <= status_len, 'status string cannot exceed %d characters, (now %d)'%(status_len, len(status))
if len(status.split(' ')) == 2:
s0, s1 = status.split(' ')
self.status = '%s'%s0.rjust(status_len//2) + ' '\
'%s'%s1.ljust(status_len//2)
else:
sp = status_len - len(status)
self.status = ''.ljust(sp//2) + status + ''.ljust((sp+1)//2)
def summary(self):
summary = ''
summary += '-'*100 + '\n'
# model
for k, v in self.module.items():
# get parameter number
param_num = sum(p.numel() for p in v.parameters())
# get information about architecture and parameter number
summary += '[%s] paramters: %s -->'%(k, human_format(param_num)) + '\n'
summary += str(v) + '\n\n'
# optim
# Hardware
summary += '-'*100 + '\n'
return summary
| 27,989
| 37.767313
| 155
|
py
|
AP-BSN
|
AP-BSN-master/src/trainer/__init__.py
|
import os
from importlib import import_module
trainer_class_dict = {}
def regist_trainer(trainer):
trainer_name = trainer.__name__.lower()
assert not trainer_name in trainer_class_dict, 'there is already registered dataset: %s in trainer_dict.' % trainer_name
trainer_class_dict[trainer_name] = trainer
return trainer
def get_trainer_class(trainer_name:str):
trainer_name = trainer_name.lower()
return trainer_class_dict[trainer_name]
# import all python files in trainer folder
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
import_module('src.trainer.{}'.format(module[:-3]))
del module
| 702
| 28.291667
| 124
|
py
|
AP-BSN
|
AP-BSN-master/src/trainer/trainer.py
|
import os
import datetime
import torch
from . import regist_trainer
from .base import BaseTrainer
from ..model import get_model_class
@regist_trainer
class Trainer(BaseTrainer):
def __init__(self, cfg):
super().__init__(cfg)
@torch.no_grad()
def test(self):
''' initialization test setting '''
# initialization
dataset_load = (self.cfg['test_img'] is None) and (self.cfg['test_dir'] is None)
self._before_test(dataset_load=dataset_load)
# set image save path
for i in range(60):
test_time = datetime.datetime.now().strftime('%m-%d-%H-%M') + '-%02d'%i
img_save_path = 'img/test_%s_%03d_%s' % (self.cfg['test']['dataset'], self.epoch, test_time)
if not self.file_manager.is_dir_exist(img_save_path): break
# -- [ TEST Single Image ] -- #
if self.cfg['test_img'] is not None:
self.test_img(self.cfg['test_img'])
exit()
# -- [ TEST Image Directory ] -- #
elif self.cfg['test_dir'] is not None:
self.test_dir(self.cfg['test_dir'])
exit()
# -- [ TEST DND Benchmark ] -- #
elif self.test_cfg['dataset'] == 'DND_benchmark':
self.test_DND(img_save_path)
exit()
# -- [ Test Normal Dataset ] -- #
else:
psnr, ssim = self.test_dataloader_process( dataloader = self.test_dataloader['dataset'],
add_con = 0. if not 'add_con' in self.test_cfg else self.test_cfg['add_con'],
floor = False if not 'floor' in self.test_cfg else self.test_cfg['floor'],
img_save_path = img_save_path,
img_save = self.test_cfg['save_image'])
# print out result as filename
if psnr is not None and ssim is not None:
with open(os.path.join(self.file_manager.get_dir(img_save_path), '_psnr-%.2f_ssim-%.3f.result'%(psnr, ssim)), 'w') as f:
f.write('PSNR: %f\nSSIM: %f'%(psnr, ssim))
@torch.no_grad()
def validation(self):
# set denoiser
self._set_denoiser()
# make directories for image saving
img_save_path = 'img/val_%03d' % self.epoch
self.file_manager.make_dir(img_save_path)
# validation
psnr, ssim = self.test_dataloader_process( dataloader = self.val_dataloader['dataset'],
add_con = 0. if not 'add_con' in self.val_cfg else self.val_cfg['add_con'],
floor = False if not 'floor' in self.val_cfg else self.val_cfg['floor'],
img_save_path = img_save_path,
img_save = self.val_cfg['save_image'])
def _set_module(self):
module = {}
if self.cfg['model']['kwargs'] is None:
module['denoiser'] = get_model_class(self.cfg['model']['type'])()
else:
module['denoiser'] = get_model_class(self.cfg['model']['type'])(**self.cfg['model']['kwargs'])
return module
def _set_optimizer(self):
optimizer = {}
for key in self.module:
optimizer[key] = self._set_one_optimizer(opt = self.train_cfg['optimizer'],
parameters = self.module[key].parameters(),
lr = float(self.train_cfg['init_lr']))
return optimizer
def _forward_fn(self, module, loss, data):
# forward
input_data = [data['dataset'][arg] for arg in self.cfg['model_input']]
denoised_img = module['denoiser'](*input_data)
model_output = {'recon': denoised_img}
# get losses
losses, tmp_info = loss(input_data, model_output, data['dataset'], module, \
ratio=(self.epoch-1 + (self.iter-1)/self.max_iter)/self.max_epoch)
return losses, tmp_info
| 4,319
| 44
| 184
|
py
|
AP-BSN
|
AP-BSN-master/src/util/logger.py
|
import threading
import datetime, os
from .progress_msg import ProgressMsg
# from .chart import LossChart
class Logger(ProgressMsg):
def __init__(self, max_iter:tuple=None, log_dir:str=None, log_file_option:str='w', log_lvl:str='note', log_file_lvl:str='info', log_include_time:bool=True):
'''
Args:
session_name (str)
max_iter (tuple) : max iteration for progress
log_dir (str) : if None, no file out for logging
log_file_option (str) : 'w' or 'a'
log_lvl (str) : 'debug' < 'note' < 'info' < 'highlight' < 'val'
log_include_time (bool)
'''
self.lvl_list = ['debug', 'note', 'info', 'highlight', 'val']
self.lvl_color = [bcolors.FAIL, None, None, bcolors.WARNING, bcolors.OKGREEN]
assert log_file_option in ['w', 'a']
assert log_lvl in self.lvl_list
assert log_file_lvl in self.lvl_list
# init progress message class
ProgressMsg.__init__(self, max_iter)
# log setting
self.log_dir = log_dir
self.log_lvl = self.lvl_list.index(log_lvl)
self.log_file_lvl = self.lvl_list.index(log_file_lvl)
self.log_include_time = log_include_time
# init logging
if self.log_dir is not None:
logfile_time = datetime.datetime.now().strftime('%m-%d-%H-%M')
self.log_file = open(os.path.join(log_dir, 'log_%s.log'%logfile_time), log_file_option)
self.val_file = open(os.path.join(log_dir, 'validation_%s.log'%logfile_time), log_file_option)
def _print(self, txt, lvl_n, end):
txt = str(txt)
if self.log_lvl <= lvl_n:
if self.lvl_color[lvl_n] is not None:
print('\033[K'+ self.lvl_color[lvl_n] + txt + bcolors.ENDC, end=end)
else:
print('\033[K'+txt, end=end)
if self.log_file_lvl <= lvl_n:
self.write_file(txt)
def debug(self, txt, end=None):
self._print(txt, self.lvl_list.index('debug'), end)
def note(self, txt, end=None):
self._print(txt, self.lvl_list.index('note'), end)
def info(self, txt, end=None):
self._print(txt, self.lvl_list.index('info'), end)
def highlight(self, txt, end=None):
self._print(txt, self.lvl_list.index('highlight'), end)
def val(self, txt, end=None):
self._print(txt, self.lvl_list.index('val'), end)
if self.log_dir is not None:
self.val_file.write(txt+'\n')
self.val_file.flush()
def write_file(self, txt):
if self.log_dir is not None:
if self.log_include_time:
time = datetime.datetime.now().strftime('%H:%M:%S')
txt = "[%s] "%time + txt
self.log_file.write(txt+'\n')
self.log_file.flush()
def clear_screen(self):
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
# https://stackoverflow.com/questions/287871/how-to-print-colored-text-in-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| 3,293
| 34.042553
| 160
|
py
|
AP-BSN
|
AP-BSN-master/src/util/progress_msg.py
|
import time
import datetime
import sys
class ProgressMsg():
def __init__(self, max_iter, min_time_interval=0.1):
'''
Args:
max_iter : (max_epoch, max_data_length, ...)
min_time_interval (second)
'''
self.max_iter = max_iter
self.min_time_interval = min_time_interval
self.start_time = time.time()
self.progress_time = self.start_time
def start(self, start_iter):
assert len(self.max_iter) == len(start_iter), 'start_iter should have same length with max variable.'
self.start_iter = start_iter
self.current_iter = start_iter
self.start_time = time.time()
self.progress_time = self.start_time
def calculate_progress(self, current_iter):
self.progress_time = time.time()
assert len(self.max_iter) == len(current_iter), 'current should have same length with max variable.'
for i in range(len(self.max_iter)):
assert current_iter[i] <= self.max_iter[i], 'current value should be less than max value.'
start_per = 0
for i in reversed(range(len(self.max_iter))):
start_per += self.start_iter[i]
start_per /= self.max_iter[i]
start_per *= 100
pg_per = 0
for i in reversed(range(len(self.max_iter))):
pg_per += current_iter[i]
pg_per /= self.max_iter[i]
pg_per *= 100
pg_per = (pg_per-start_per) / (100-start_per) * 100
if pg_per != 0:
elapsed = time.time() - self.start_time
total = 100*elapsed/pg_per
remain = total - elapsed
elapsed_str = str(datetime.timedelta(seconds=int(elapsed)))
remain_str = str(datetime.timedelta(seconds=int(remain)))
total_str = str(datetime.timedelta(seconds=int(total)))
else:
elapsed = time.time() - self.start_time
elapsed_str = str(datetime.timedelta(seconds=int(elapsed)))
remain_str = 'INF'
total_str = 'INF'
return pg_per, elapsed_str, remain_str, total_str
def print_prog_msg(self, current_iter):
if time.time() - self.progress_time >= self.min_time_interval:
pg_per, elapsed_str, remain_str, total_str = self.calculate_progress(current_iter)
txt = '\033[K>>> progress : %.2f%%, elapsed: %s, remaining: %s, total: %s \t\t\t\t\t' % (pg_per, elapsed_str, remain_str, total_str)
print(txt, end='\r')
return txt.replace('\t', '')
return
def get_start_msg(self):
return 'Start >>>'
def get_finish_msg(self):
total = time.time() - self.start_time
total_str = str(datetime.timedelta(seconds=int(total)))
txt = 'Finish >>> (total elapsed time : %s)' % total_str
return txt
if __name__ == '__main__':
import logging
logging.basicConfig(
format='%(message)s',
level=logging.INFO,
handlers=[logging.StreamHandler()]
)
pp = ProgressMsg((10,10))
ss = (0, 0)
pp.start(ss)
print(pp.__class__.__name__)
for i in range(0, 10):
for j in range(10):
for k in range(10):
time.sleep(0.5)
pp.print_prog_msg((i, j))
logging.info('ttt')
| 3,364
| 30.448598
| 144
|
py
|
AP-BSN
|
AP-BSN-master/src/util/summary_logging.py
|
import time
from torch.utils.tensorboard import SummaryWriter
import numpy as np
class LossWriter(SummaryWriter):
def __init__(self, log_dir=None, comment=''):
if log_dir == None:
log_dir = './logs/tensorboard/' + time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime(time.time()))
super(LossWriter, self).__init__(log_dir=log_dir, comment=comment)
def write_loss(self, loss_name, scalar, n_iter):
self.add_scalar('Loss/'+loss_name, scalar, n_iter)
if __name__=='__main__':
testwriter = LossWriter()
for n_iter in range(100):
testwriter.write_loss(np.random.random(), n_iter)
| 640
| 28.136364
| 110
|
py
|
AP-BSN
|
AP-BSN-master/src/util/config_parse.py
|
import yaml, os
class ConfigParser:
def __init__(self, args):
# load model configuration
cfg_file = os.path.join('conf', args.config+'.yaml')
with open(cfg_file) as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
# load argument
for arg in args.__dict__:
self.config[arg] = args.__dict__[arg]
# string None handing
self.convert_None(self.config)
def __getitem__(self, name):
return self.config[name]
def convert_None(self, d):
for key in d:
if d[key] == 'None':
d[key] = None
if isinstance(d[key], dict):
self.convert_None(d[key])
if __name__ == "__main__":
import argparse
args = argparse.ArgumentParser()
args.add_argument('-c', '--config', default=None, type=str)
args.add_argument('-d', '--device', default=None, type=str)
args.add_argument('-r', '--resume', action='store_true')
args = args.parse_args()
args.config = "./conf/resnet_cfg.yaml"
cp = ConfigParser(args)
| 1,087
| 26.897436
| 63
|
py
|
AP-BSN
|
AP-BSN-master/src/util/util.py
|
from math import exp
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
def np2tensor(n:np.array):
'''
transform numpy array (image) to torch Tensor
BGR -> RGB
(h,w,c) -> (c,h,w)
'''
# gray
if len(n.shape) == 2:
return torch.from_numpy(np.ascontiguousarray(np.transpose(n, (2,0,1))))
# RGB -> BGR
elif len(n.shape) == 3:
return torch.from_numpy(np.ascontiguousarray(np.transpose(np.flip(n, axis=2), (2,0,1))))
else:
raise RuntimeError('wrong numpy dimensions : %s'%(n.shape,))
def tensor2np(t:torch.Tensor):
'''
transform torch Tensor to numpy having opencv image form.
RGB -> BGR
(c,h,w) -> (h,w,c)
'''
t = t.cpu().detach()
# gray
if len(t.shape) == 2:
return t.permute(1,2,0).numpy()
# RGB -> BGR
elif len(t.shape) == 3:
return np.flip(t.permute(1,2,0).numpy(), axis=2)
# image batch
elif len(t.shape) == 4:
return np.flip(t.permute(0,2,3,1).numpy(), axis=3)
else:
raise RuntimeError('wrong tensor dimensions : %s'%(t.shape,))
def imwrite_tensor(t, name='test.png'):
cv2.imwrite('./%s'%name, tensor2np(t.cpu()))
def imread_tensor(name='test'):
return np2tensor(cv2.imread('./%s'%name))
def rot_hflip_img(img:torch.Tensor, rot_times:int=0, hflip:int=0):
'''
rotate '90 x times degree' & horizontal flip image
(shape of img: b,c,h,w or c,h,w)
'''
b=0 if len(img.shape)==3 else 1
# no flip
if hflip % 2 == 0:
# 0 degrees
if rot_times % 4 == 0:
return img
# 90 degrees
elif rot_times % 4 == 1:
return img.flip(b+1).transpose(b+1,b+2)
# 180 degrees
elif rot_times % 4 == 2:
return img.flip(b+2).flip(b+1)
# 270 degrees
else:
return img.flip(b+2).transpose(b+1,b+2)
# horizontal flip
else:
# 0 degrees
if rot_times % 4 == 0:
return img.flip(b+2)
# 90 degrees
elif rot_times % 4 == 1:
return img.flip(b+1).flip(b+2).transpose(b+1,b+2)
# 180 degrees
elif rot_times % 4 == 2:
return img.flip(b+1)
# 270 degrees
else:
return img.transpose(b+1,b+2)
def pixel_shuffle_down_sampling(x:torch.Tensor, f:int, pad:int=0, pad_value:float=0.):
'''
pixel-shuffle down-sampling (PD) from "When AWGN-denoiser meets real-world noise." (AAAI 2019)
Args:
x (Tensor) : input tensor
f (int) : factor of PD
pad (int) : number of pad between each down-sampled images
pad_value (float) : padding value
Return:
pd_x (Tensor) : down-shuffled image tensor with pad or not
'''
# single image tensor
if len(x.shape) == 3:
c,w,h = x.shape
unshuffled = F.pixel_unshuffle(x, f)
if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)
return unshuffled.view(c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,3,2,4).reshape(c, w+2*f*pad, h+2*f*pad)
# batched image tensor
else:
b,c,w,h = x.shape
unshuffled = F.pixel_unshuffle(x, f)
if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value)
return unshuffled.view(b,c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,2,4,3,5).reshape(b,c,w+2*f*pad, h+2*f*pad)
def pixel_shuffle_up_sampling(x:torch.Tensor, f:int, pad:int=0):
'''
inverse of pixel-shuffle down-sampling (PD)
see more details about PD in pixel_shuffle_down_sampling()
Args:
x (Tensor) : input tensor
f (int) : factor of PD
pad (int) : number of pad will be removed
'''
# single image tensor
if len(x.shape) == 3:
c,w,h = x.shape
before_shuffle = x.view(c,f,w//f,f,h//f).permute(0,1,3,2,4).reshape(c*f*f,w//f,h//f)
if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]
return F.pixel_shuffle(before_shuffle, f)
# batched image tensor
else:
b,c,w,h = x.shape
before_shuffle = x.view(b,c,f,w//f,f,h//f).permute(0,1,2,4,3,5).reshape(b,c*f*f,w//f,h//f)
if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad]
return F.pixel_shuffle(before_shuffle, f)
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def psnr(img1, img2):
'''
image value range : [0 - 255]
clipping for model output
'''
if len(img1.shape) == 4:
img1 = img1[0]
if len(img2.shape) == 4:
img2 = img2[0]
# tensor to numpy
if isinstance(img1, torch.Tensor):
img1 = tensor2np(img1)
if isinstance(img2, torch.Tensor):
img2 = tensor2np(img2)
# numpy value cliping & chnage type to uint8
img1 = np.clip(img1, 0, 255)
img2 = np.clip(img2, 0, 255)
return peak_signal_noise_ratio(img1, img2, data_range=255)
def ssim(img1, img2):
'''
image value range : [0 - 255]
clipping for model output
'''
if len(img1.shape) == 4:
img1 = img1[0]
if len(img2.shape) == 4:
img2 = img2[0]
# tensor to numpy
if isinstance(img1, torch.Tensor):
img1 = tensor2np(img1)
if isinstance(img2, torch.Tensor):
img2 = tensor2np(img2)
# numpy value cliping
img2 = np.clip(img2, 0, 255)
img1 = np.clip(img1, 0, 255)
return structural_similarity(img1, img2, multichannel=True, data_range=255)
def get_gaussian_2d_filter(window_size, sigma, channel=1, device=torch.device('cpu')):
'''
return 2d gaussian filter window as tensor form
Arg:
window_size : filter window size
sigma : standard deviation
'''
gauss = torch.ones(window_size, device=device)
for x in range(window_size): gauss[x] = exp(-(x - window_size//2)**2/float(2*sigma**2))
gauss = gauss.unsqueeze(1)
#gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)], device=device).unsqueeze(1)
filter2d = gauss.mm(gauss.t()).float()
filter2d = (filter2d/filter2d.sum()).unsqueeze(0).unsqueeze(0)
return filter2d.expand(channel, 1, window_size, window_size)
def get_mean_2d_filter(window_size, channel=1, device=torch.device('cpu')):
'''
return 2d mean filter as tensor form
Args:
window_size : filter window size
'''
window = torch.ones((window_size, window_size), device=device)
window = (window/window.sum()).unsqueeze(0).unsqueeze(0)
return window.expand(channel, 1, window_size, window_size)
def mean_conv2d(x, window_size=None, window=None, filter_type='gau', sigma=None, keep_sigma=False, padd=True):
'''
color channel-wise 2d mean or gaussian convolution
Args:
x : input image
window_size : filter window size
filter_type(opt) : 'gau' or 'mean'
sigma : standard deviation of gaussian filter
'''
b_x = x.unsqueeze(0) if len(x.shape) == 3 else x
if window is None:
if sigma is None: sigma = (window_size-1)/6
if filter_type == 'gau':
window = get_gaussian_2d_filter(window_size, sigma=sigma, channel=b_x.shape[1], device=x.device)
else:
window = get_mean_2d_filter(window_size, channel=b_x.shape[1], device=x.device)
else:
window_size = window.shape[-1]
if padd:
pl = (window_size-1)//2
b_x = F.pad(b_x, (pl,pl,pl,pl), 'reflect')
m_b_x = F.conv2d(b_x, window, groups=b_x.shape[1])
if keep_sigma:
m_b_x /= (window**2).sum().sqrt()
if len(x.shape) == 4:
return m_b_x
elif len(x.shape) == 3:
return m_b_x.squeeze(0)
else:
raise ValueError('input image shape is not correct')
| 7,961
| 31.765432
| 132
|
py
|
AP-BSN
|
AP-BSN-master/src/util/file_manager.py
|
import os
import cv2
import numpy as np
import torch
from .util import tensor2np
class FileManager:
def __init__(self, session_name:str):
self.output_folder = "./output"
if not os.path.isdir(self.output_folder):
os.makedirs(self.output_folder)
print("[WARNING] output folder is not exist, create new one")
# init session
self.session_name = session_name
os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True)
# mkdir
for directory in ['checkpoint', 'img', 'tboard']:
self.make_dir(directory)
def is_dir_exist(self, dir_name:str) -> bool:
return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name))
def make_dir(self, dir_name:str) -> str:
os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True)
def get_dir(self, dir_name:str) -> str:
# -> './output/<session_name>/dir_name'
return os.path.join(self.output_folder, self.session_name, dir_name)
def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'):
self.save_img_numpy(dir_name, file_name, tensor2np(img), ext)
def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'):
file_dir_name = os.path.join(self.get_dir(dir_name), '%s.%s'%(file_name, ext))
if np.shape(img)[2] == 1:
cv2.imwrite(file_dir_name, np.squeeze(img, 2))
else:
cv2.imwrite(file_dir_name, img)
| 1,563
| 35.372093
| 98
|
py
|
AP-BSN
|
AP-BSN-master/src/util/dnd_submission/pytorch_wrapper.py
|
# Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de)
# This file is part of the implementation as described in the CVPR 2017 paper:
# Tobias Plötz and Stefan Roth, Benchmarking Denoising Algorithms with Real Photographs.
# Please see the file LICENSE.txt for the license governing this code.
import numpy as np
import torch
from torch.autograd import Variable
def pytorch_denoiser(denoiser, use_cuda):
def wrap_denoiser(Inoisy, nlf):
noisy = torch.from_numpy(Inoisy)
if len(noisy.shape) > 2:
noisy = noisy.view(1,noisy.shape[2], noisy.shape[0], noisy.shape[1])
else:
noisy = noisy.view(1,1, noisy.shape[0], noisy.shape[1])
if use_cuda:
noisy = noisy.cuda()
noisy = Variable(noisy)
denoised = denoiser(noisy, nlf)
denoised = denoised[0,...].cpu().numpy()
denoised = np.transpose(denoised, [1,2,0])
return denoised
return wrap_denoiser
| 984
| 31.833333
| 89
|
py
|
AP-BSN
|
AP-BSN-master/src/util/dnd_submission/bundle_submissions.py
|
# Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de)
# This file is part of the implementation as described in the CVPR 2017 paper:
# Tobias Plötz and Stefan Roth, Benchmarking Denoising Algorithms with Real Photographs.
# Please see the file LICENSE.txt for the license governing this code.
import numpy as np
import scipy.io as sio
import os
import h5py
def bundle_submissions_raw(submission_folder):
'''
Bundles submission data for raw denoising
submission_folder Folder where denoised images reside
Output is written to <submission_folder>/bundled/. Please submit
the content of this folder.
'''
out_folder = os.path.join(submission_folder, "bundled/")
try:
os.mkdir(out_folder)
except:pass
israw = True
eval_version="1.0"
for i in range(50):
Idenoised = np.zeros((20,), dtype=np.object)
for bb in range(20):
filename = '%04d_%02d.mat'%(i+1,bb+1)
s = sio.loadmat(os.path.join(submission_folder,filename))
Idenoised_crop = s["Idenoised_crop"]
Idenoised[bb] = Idenoised_crop
filename = '%04d.mat'%(i+1)
sio.savemat(os.path.join(out_folder, filename),
{"Idenoised": Idenoised,
"israw": israw,
"eval_version": eval_version},
)
def bundle_submissions_srgb(submission_folder):
'''
Bundles submission data for sRGB denoising
submission_folder Folder where denoised images reside
Output is written to <submission_folder>/bundled/. Please submit
the content of this folder.
'''
out_folder = os.path.join(submission_folder, "bundled/")
try:
os.mkdir(out_folder)
except:pass
israw = False
eval_version="1.0"
for i in range(50):
Idenoised = np.zeros((20,), dtype=np.object)
for bb in range(20):
filename = '%04d_%02d.mat'%(i+1,bb+1)
s = sio.loadmat(os.path.join(submission_folder,filename))
Idenoised_crop = s["Idenoised_crop"]
Idenoised[bb] = Idenoised_crop
filename = '%04d.mat'%(i+1)
sio.savemat(os.path.join(out_folder, filename),
{"Idenoised": Idenoised,
"israw": israw,
"eval_version": eval_version},
)
| 2,380
| 31.616438
| 89
|
py
|
AP-BSN
|
AP-BSN-master/src/util/dnd_submission/dnd_denoise.py
|
# Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de)
# This file is part of the implementation as described in the CVPR 2017 paper:
# Tobias Plötz and Stefan Roth, Benchmarking Denoising Algorithms with Real Photographs.
# Please see the file LICENSE.txt for the license governing this code.
import numpy as np
import scipy.io as sio
import os
import h5py
def load_nlf(info, img_id):
nlf = {}
nlf_h5 = info[info["nlf"][0][img_id]]
nlf["a"] = nlf_h5["a"][0][0]
nlf["b"] = nlf_h5["b"][0][0]
return nlf
def load_sigma_raw(info, img_id, bb, yy, xx):
nlf_h5 = info[info["sigma_raw"][0][img_id]]
sigma = nlf_h5[xx,yy,bb]
return sigma
def load_sigma_srgb(info, img_id, bb):
nlf_h5 = info[info["sigma_srgb"][0][img_id]]
sigma = nlf_h5[0,bb]
return sigma
def denoise_raw(denoiser, data_folder, out_folder):
'''
Utility function for denoising all bounding boxes in all raw images of
the DND dataset.
denoiser Function handle
It is called as Idenoised = denoiser(Inoisy, nlf) where Inoisy is the noisy image patch
and nlf is a dictionary containing the parameters of the noise level
function (nlf["a"], nlf["b"]) and a mean noise strength (nlf["sigma"])
data_folder Folder where the DND dataset resides
out_folder Folder where denoised output should be written to
'''
try:
os.makedirs(out_folder)
except:pass
# load info
infos = h5py.File(os.path.join(data_folder, 'info.mat'), 'r')
info = infos['info']
bb = info['boundingboxes']
print('info loaded\n')
# process data
for i in range(50):
filename = os.path.join(data_folder, 'images_raw', '%04d.mat'%(i+1))
img = h5py.File(filename, 'r')
Inoisy = np.float32(np.array(img['Inoisy']).T)
# bounding box
ref = bb[0][i]
boxes = np.array(info[ref]).T
for k in range(20):
idx = [int(boxes[k,0]-1),int(boxes[k,2]),int(boxes[k,1]-1),int(boxes[k,3])]
Inoisy_crop = Inoisy[idx[0]:idx[1],idx[2]:idx[3]].copy()
Idenoised_crop = Inoisy_crop.copy()
H = Inoisy_crop.shape[0]
W = Inoisy_crop.shape[1]
nlf = load_nlf(info, i)
for yy in range(2):
for xx in range(2):
nlf["sigma"] = load_sigma_raw(info, i, k, yy, xx)
Inoisy_crop_c = Inoisy_crop[yy:H:2,xx:W:2].copy()
Idenoised_crop_c = denoiser(Inoisy_crop_c, nlf)
Idenoised_crop[yy:H:2,xx:W:2] = Idenoised_crop_c
# save denoised data
Idenoised_crop = np.float32(Idenoised_crop)
save_file = os.path.join(out_folder, '%04d_%02d.mat'%(i+1,k+1))
sio.savemat(save_file, {'Idenoised_crop': Idenoised_crop})
print('%s crop %d/%d' % (filename, k+1, 20))
print('[%d/%d] %s done\n' % (i+1, 50, filename))
def denoise_srgb(denoiser, data_folder, out_folder):
'''
Utility function for denoising all bounding boxes in all sRGB images of
the DND dataset.
denoiser Function handle
It is called as Idenoised = denoiser(Inoisy, nlf) where Inoisy is the noisy image patch
and nlf is a dictionary containing the mean noise strength (nlf["sigma"])
data_folder Folder where the DND dataset resides
out_folder Folder where denoised output should be written to
'''
try:
os.makedirs(out_folder)
except:pass
print('model loaded\n')
# load info
infos = h5py.File(os.path.join(data_folder, 'info.mat'), 'r')
info = infos['info']
bb = info['boundingboxes']
print('info loaded\n')
# process data
for i in range(50):
filename = os.path.join(data_folder, 'images_srgb', '%04d.mat'%(i+1))
img = h5py.File(filename, 'r')
Inoisy = np.float32(np.array(img['InoisySRGB']).T)
# bounding box
ref = bb[0][i]
boxes = np.array(info[ref]).T
for k in range(20):
idx = [int(boxes[k,0]-1),int(boxes[k,2]),int(boxes[k,1]-1),int(boxes[k,3])]
Inoisy_crop = Inoisy[idx[0]:idx[1],idx[2]:idx[3],:].copy()
H = Inoisy_crop.shape[0]
W = Inoisy_crop.shape[1]
nlf = load_nlf(info, i)
nlf["sigma"] = load_sigma_srgb(info, i, k)
Idenoised_crop = denoiser(Inoisy_crop, nlf, i, k)
# for yy in range(2):
# for xx in range(2):
# nlf["sigma"] = load_sigma_srgb(info, i, k)
# Idenoised_crop = denoiser(Inoisy_crop, nlf, i, k)
# save denoised data
Idenoised_crop = np.float32(Idenoised_crop)
save_file = os.path.join(out_folder, '%04d_%02d.mat'%(i+1,k+1))
sio.savemat(save_file, {'Idenoised_crop': Idenoised_crop})
print('%s crop %d/%d' % (filename, k+1, 20))
print('[%d/%d] %s done\n' % (i+1, 50, filename))
| 5,052
| 39.103175
| 106
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/custom.py
|
import os
import h5py
from src.datahandler.denoise_dataset import DenoiseDataSet
from . import regist_dataset
@regist_dataset
class CustomSample(DenoiseDataSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
# check if the dataset exists
dataset_path = os.path.join('WRITE_YOUR_DATASET_DIRECTORY')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
# WRITE YOUR CODE FOR SCANNING DATA
# example:
for root, _, files in os.walk(dataset_path):
for file_name in files:
self.img_paths.append(os.path.join(root, file_name))
def _load_data(self, data_idx):
# WRITE YOUR CODE FOR LOADING DATA FROM DATA INDEX
# example:
file_name = self.img_paths[data_idx]
noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name))
clean_img = self._load_img(os.path.join(self.dataset_path, 'CL' , file_name))
return {'clean': clean_img, 'real_noisy': noisy_img} # paired dataset
# return {'real_noisy': noisy_img} # only noisy image dataset
| 1,163
| 33.235294
| 85
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/DND.py
|
import os
import torch
import h5py
from src.datahandler.denoise_dataset import DenoiseDataSet
from . import regist_dataset
@regist_dataset
class DND(DenoiseDataSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
dataset_path = os.path.join(self.dataset_dir, 'DND/dnd_2017/images_srgb')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
for root, _, files in os.walk(dataset_path):
for file_name in files:
self.img_paths.append(os.path.join(root, file_name))
def _load_data(self, data_idx):
with h5py.File(self.img_paths[data_idx], 'r') as img_file:
noisy_img = img_file[list(img_file.keys())[0]][()]*255.
return {'real_noisy': torch.from_numpy(noisy_img)}
@regist_dataset
class prep_DND(DenoiseDataSet):
'''
dataset class for prepared DND dataset which is cropped with overlap.
[using size 512x512 with 128 overlapping]
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
self.dataset_path = os.path.join(self.dataset_dir, 'prep/DND_s512_o128')
assert os.path.exists(self.dataset_path), 'There is no dataset %s'%self.dataset_path
for root, _, files in os.walk(os.path.join(self.dataset_path, 'RN')):
self.img_paths = files
def _load_data(self, data_idx):
file_name = self.img_paths[data_idx]
noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name))
return {'real_noisy': noisy_img} #'instances': instance }
@regist_dataset
class DND_benchmark(DenoiseDataSet):
'''
dumpy dataset class for DND benchmark
DND benchmarking code is implemented in the "trainer" directly
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
pass
def _load_data(self, data_idx):
pass
| 1,988
| 31.080645
| 92
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/denoise_dataset.py
|
import random, os
import cv2
import numpy as np
from scipy.io import savemat
import torch
from torch.utils.data import Dataset
from ..util.util import rot_hflip_img, tensor2np, np2tensor, mean_conv2d
class DenoiseDataSet(Dataset):
def __init__(self, add_noise:str=None, crop_size:list=None, aug:list=None, n_repeat:int=1, n_data:int=None, ratio_data:float=None) -> None:
'''
Base denoising dataset class for various dataset.
to build custom dataset class, below functions must be implemented in the inherited class. (or see other dataset class already implemented.)
- self._scan(self) : scan image data & save its paths. (saved to self.img_paths)
- self._load_data(self, data_idx) : load single paired data from idx as a form of dictionary.
Args:
add_noise (str) : configuration of additive noise to synthesize noisy image. (see _add_noise() for more details.)
crop_size (list) : crop size, e.g. [W] or [H, W] and no crop if None
aug (list) : list of data augmentations (see _augmentation() for more details.)
n_repeat (int) : number of repeat for each data.
n_data (int) : number of data to be used. (default: None = all data)
ratio_data (float) : ratio of data to be used. (activated when n_data=None, default: None = all data)
'''
self.dataset_dir = './dataset'
if not os.path.isdir(self.dataset_dir):
raise Exception('dataset directory is not exist')
# parse additive noise argument
self.add_noise_type, self.add_noise_opt, self.add_noise_clamp = self._parse_add_noise(add_noise)
# set parameters for dataset.
self.crop_size = crop_size
self.aug = aug
self.n_repeat = n_repeat
# scan all data and fill in self.img_paths
self.img_paths = []
self._scan()
if len(self.img_paths) > 0:
if self.img_paths[0].__class__.__name__ in ['int', 'str', 'float']:
self.img_paths.sort()
# set data amount
if n_data is not None: self.n_data = n_data
elif ratio_data is not None: self.n_data = int(ratio_data * len(self.img_paths))
else: self.n_data = len(self.img_paths)
def __len__(self):
return self.n_data * self.n_repeat
def __getitem__(self, idx):
'''
final dictionary shape of data:
{'clean', 'syn_noisy', 'real_noisy', 'noisy (any of real[first priority] and syn)', etc}
'''
# calculate data index
data_idx = idx % self.n_data
# load data
data = self._load_data(data_idx)
# pre-processing (currently only crop)
data = self._pre_processing(data)
# synthesize additive noise
if self.add_noise_type is not None:
if 'clean' in data:
syn_noisy_img, nlf = self._add_noise(data['clean'], self.add_noise_type, self.add_noise_opt, self.add_noise_clamp)
data['syn_noisy'] = syn_noisy_img
data['nlf'] = nlf
elif 'real_noisy' in data:
syn_noisy_img, nlf = self._add_noise(data['real_noisy'], self.add_noise_type, self.add_noise_opt, self.add_noise_clamp)
data['syn_noisy'] = syn_noisy_img
data['nlf'] = nlf
else:
raise RuntimeError('there is no clean or real image to synthesize. (synthetic noise type: %s)'%self.add_noise_type)
# data augmentation
if self.aug is not None:
data = self._augmentation(data, self.aug)
# add general label 'noisy' to use any of real_noisy or syn_noisy (real first)
if 'real_noisy' in data or 'syn_noisy' in data:
data['noisy'] = data['real_noisy'] if 'real_noisy' in data else data['syn_noisy']
return data
def _scan(self):
raise NotImplementedError
# TODO fill in self.img_paths (include path from project directory)
def _load_data(self, data_idx):
raise NotImplementedError
# TODO load possible data as dictionary
# dictionary key list :
# 'clean' : clean image without noise (gt or anything).
# 'real_noisy' : real noisy image or already synthesized noisy image.
# 'instances' : any other information of capturing situation.
#----------------------------#
# Image handling functions #
#----------------------------#
def _load_img(self, img_name, as_gray=False):
img = cv2.imread(img_name, 1)
assert img is not None, "failure on loading image - %s"%img_name
return self._load_img_from_np(img, as_gray, RGBflip=True)
def _load_img_from_np(self, img, as_gray=False, RGBflip=False):
# if color
if len(img.shape) != 2:
if as_gray:
# follows definition of sRBG in terms of the CIE 1931 linear luminance.
# because calculation opencv color conversion and imread grayscale mode is a bit different.
# https://en.wikipedia.org/wiki/Grayscale
img = np.average(img, axis=2, weights=[0.0722, 0.7152, 0.2126])
img = np.expand_dims(img, axis=0)
else:
if RGBflip:
img = np.flip(img, axis=2)
img = np.transpose(img, (2,0,1))
# if gray
else:
img = np.expand_dims(img, axis=0)
return torch.from_numpy(np.ascontiguousarray(img).astype(np.float32))
def _pre_processing(self, data):
# get a patch from image data
if self.crop_size != None:
data = self._get_patch(self.crop_size, data)
return data
def _get_patch(self, crop_size, data, rnd=True):
# check all image size is same
if 'clean' in data and 'real_noisy' in data:
assert data['clean'].shape[1] == data['clean'].shape[1] and data['real_noisy'].shape[2] == data['real_noisy'].shape[2], \
'img shape should be same. (%d, %d) != (%d, %d)' % (data['clean'].shape[1], data['clean'].shape[1], data['real_noisy'].shape[2], data['real_noisy'].shape[2])
# get image shape and select random crop location
if 'clean' in data:
max_x = data['clean'].shape[2] - crop_size[0]
max_y = data['clean'].shape[1] - crop_size[1]
else:
max_x = data['real_noisy'].shape[2] - crop_size[0]
max_y = data['real_noisy'].shape[1] - crop_size[1]
assert max_x >= 0
assert max_y >= 0
if rnd and max_x>0 and max_y>0:
x = np.random.randint(0, max_x)
y = np.random.randint(0, max_y)
else:
x, y = 0, 0
# crop
if 'clean' in data:
data['clean'] = data['clean'][:, y:y+crop_size[1], x:x+crop_size[0]]
if 'real_noisy' in data:
data['real_noisy'] = data['real_noisy'][:, y:y+crop_size[1], x:x+crop_size[0]]
return data
def normalize_data(self, data, cuda=False):
# for all image
for key in data:
if self._is_image_tensor(data[key]):
data[key] = self.normalize(data[key], cuda)
return data
def inverse_normalize_data(self, data, cuda=False):
# for all image
for key in data:
# is image
if self._is_image_tensor(data[key]):
data[key] = self.inverse_normalize(data[key], cuda)
return data
def normalize(self, img, cuda=False):
if img.shape[0] == 1:
stds = self.gray_stds
means = self.gray_means
elif img.shape[0] == 3:
stds = self.color_stds
means = self.color_means
else:
raise RuntimeError('undefined image channel length : %d'%img.shape[0])
if cuda:
means, stds = means.cuda(), stds.cuda()
return (img-means) / stds
def inverse_normalize(self, img, cuda=False):
if img.shape[0] == 1:
stds = self.gray_stds
means = self.gray_means
elif img.shape[0] == 3:
stds = self.color_stds
means = self.color_means
else:
raise RuntimeError('undefined image channel length : %d'%img.shape[0])
if cuda:
means, stds = means.cuda(), stds.cuda()
return (img*stds) + means
def _parse_add_noise(self, add_noise_str:str):
'''
noise_type-opt0:opt1:opt2-clamp
'''
if add_noise_str == 'bypass':
return 'bypass', None, None
elif add_noise_str != None:
add_noise_type = add_noise_str.split('-')[0]
add_noise_opt = [float(v) for v in add_noise_str.split('-')[1].split(':')]
add_noise_clamp = len(add_noise_str.split('-'))>2 and add_noise_str.split('-')[2] == 'clamp'
return add_noise_type, add_noise_opt, add_noise_clamp
else:
return None, None, None
def _add_noise(self, clean_img:torch.Tensor, add_noise_type:str, opt:list, clamp:bool=False) -> torch.Tensor:
'''
add various noise to clean image.
Args:
clean_img (Tensor) : clean image to synthesize on
add_noise_type : below types are available
opt (list) : args for synthsize noise
clamp (bool) : optional, clamp noisy image into [0,255]
Return:
synthesized_img
Noise_types
- bypass : bypass clean image
- uni : uniform distribution noise from -opt[0] ~ opt[0]
- gau : gaussian distribution noise with zero-mean & opt[0] variance
- gau_blind : blind gaussian distribution with zero-mean, variance is uniformly selected from opt[0] ~ opt[1]
- struc_gau : structured gaussian noise. gaussian filter is applied to above gaussian noise. opt[0] is variance of gaussian, opt[1] is window size and opt[2] is sigma of gaussian filter.
- het_gau : heteroscedastic gaussian noise with indep weight:opt[0], dep weight:opt[1]
'''
nlf = None
if add_noise_type == 'bypass':
# bypass clean image
synthesized_img = clean_img
elif add_noise_type == 'uni':
# add uniform noise
synthesized_img = clean_img + 2*opt[0] * torch.rand(clean_img.shape) - opt[0]
elif add_noise_type == 'gau':
# add AWGN
nlf = opt[0]
synthesized_img = clean_img + torch.normal(mean=0., std=nlf, size=clean_img.shape)
elif add_noise_type == 'gau_blind':
# add blind gaussian noise
nlf = random.uniform(opt[0], opt[1])
synthesized_img = clean_img + torch.normal(mean=0., std=nlf, size=clean_img.shape)
elif add_noise_type == 'struc_gau':
# add structured gaussian noise (used in the paper "Noiser2Noise": https://arxiv.org/pdf/1910.11908.pdf)
nlf = opt[0]
gau_noise = torch.normal(mean=0., std=opt[0], size=clean_img.shape)
struc_gau = mean_conv2d(gau_noise, window_size=int(opt[1]), sigma=opt[2], keep_sigma=True)
synthesized_img = clean_img + struc_gau
elif add_noise_type == 'het_gau':
# add heteroscedastic guassian noise
het_gau_std = (clean_img * (opt[0]**2) + torch.ones(clean_img.shape) * (opt[1]**2)).sqrt()
nlf = het_gau_std
synthesized_img = clean_img + torch.normal(mean=0., std=nlf)
else:
raise RuntimeError('undefined additive noise type : %s'%add_noise_type)
if clamp:
synthesized_img = torch.clamp(synthesized_img, 0, 255)
return synthesized_img, nlf
def _augmentation(self, data:dict, aug:list):
'''
Parsing augmentation list and apply it to the data images.
'''
# parsign augmentation
rot, hflip = 0, 0
for aug_name in aug:
# aug : random rotation
if aug_name == 'rot':
rot = random.randint(0,3)
# aug : random flip
elif aug_name == 'hflip':
hflip = random.randint(0,1)
else:
raise RuntimeError('undefined augmentation option : %s'%aug_name)
# for every data(only image), apply rotation and flipping augmentation.
for key in data:
if self._is_image_tensor(data[key]):
# random rotation and flip
if rot != 0 or hflip != 0:
data[key] = rot_hflip_img(data[key], rot, hflip)
return data
#----------------------------#
# Image saving functions #
#----------------------------#
def save_all_image(self, dir, clean=False, syn_noisy=False, real_noisy=False):
for idx in range(len(self.img_paths)):
data = self.__getitem__(idx)
if clean and 'clean' in data:
cv2.imwrite(os.path.join(dir, '%04d_CL.png'%idx), tensor2np(data['clean']))
if syn_noisy and 'syn_noisy' in data:
cv2.imwrite(os.path.join(dir, '%04d_SN.png'%idx), tensor2np(data['syn_noisy']))
if real_noisy and 'real_noisy' in data:
cv2.imwrite(os.path.join(dir, '%04d_RN.png'%idx), tensor2np(data['real_noisy']))
print('image %04d saved!'%idx)
def prep_save(self, img_idx:int, img_size:int, overlap:int, clean:bool=False, syn_noisy:bool=False, real_noisy:bool=False):
'''
cropping am image into mini-size patches for efficient training.
Args:
img_idx (int) : index of image
img_size (int) : size of image
overlap (int) : overlap between patches
clean (bool) : save clean image (default: False)
syn_noisy (bool) : save synthesized noisy image (default: False)
real_noisy (bool) : save real noisy image (default: False)
'''
d_name = '%s_s%d_o%d'%(self.__class__.__name__, img_size, overlap)
os.makedirs(os.path.join(self.dataset_dir, 'prep', d_name), exist_ok=True)
assert overlap < img_size
stride = img_size - overlap
if clean:
clean_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'CL')
os.makedirs(clean_dir, exist_ok=True)
if syn_noisy:
syn_noisy_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'SN')
os.makedirs(syn_noisy_dir, exist_ok=True)
if real_noisy:
real_noisy_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'RN')
os.makedirs(real_noisy_dir, exist_ok=True)
data = self.__getitem__(img_idx)
c,h,w = data['clean'].shape if 'clean' in data else data['real_noisy'].shape
for h_idx in range((h-img_size)//stride + 1):
for w_idx in range((w-img_size+1)//stride + 1):
hl, hr = h_idx*stride, h_idx*stride+img_size
wl, wr = w_idx*stride, w_idx*stride+img_size
if clean: cv2.imwrite(os.path.join(clean_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['clean'][:,hl:hr,wl:wr]))
if syn_noisy: cv2.imwrite(os.path.join(syn_noisy_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['syn_noisy'][:,hl:hr,wl:wr]))
if real_noisy: cv2.imwrite(os.path.join(real_noisy_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['real_noisy'][:,hl:hr,wl:wr]))
print('Cropped image %d / %d'%(img_idx, self.__len__()))
#----------------------------#
# etc #
#----------------------------#
def _is_image_tensor(self, x):
'''
return input tensor has image shape. (include batched image)
'''
if isinstance(x, torch.Tensor):
if len(x.shape) == 3 or len(x.shape) == 4:
if x.dtype != torch.bool:
return True
return False
class ReturnMergedDataset():
def __init__(self, d_list):
self.d_list = d_list
def __call__(self, *args, **kwargs):
return MergedDataset(self.d_list, *args, **kwargs)
class MergedDataset(Dataset):
def __init__(self, d_list, *args, **kwargs):
'''
Merged denoising dataset when you use multiple dataset combined.
see more details of DenoiseDataSet
'''
from ..datahandler import get_dataset_object
self.dataset_list = []
for d in d_list:
self.dataset_list.append(get_dataset_object(d)(*args, **kwargs))
self.data_contents_flags = {'clean':True, 'noisy':True, 'real_noisy':True}
self.dataset_length = []
for d in self.dataset_list:
self.dataset_length.append(d.__len__())
data_sample = d.__getitem__(0)
for key in self.data_contents_flags.keys():
if not key in data_sample:
self.data_contents_flags[key] = False
def __len__(self):
return sum(self.dataset_length)
def __getitem__(self, idx):
t_idx = idx
for d_idx, d in enumerate(self.dataset_list):
if t_idx < self.dataset_length[d_idx]:
data = d.__getitem__(t_idx)
return_data = {}
for key in self.data_contents_flags.keys():
if self.data_contents_flags[key]:
return_data[key] = data[key]
return return_data
t_idx -= self.dataset_length[d_idx]
raise RuntimeError('index of merged dataset contains some bugs, total length %d, requiring idx %d'%(self.__len__(), idx))
| 17,823
| 41.539379
| 198
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/SIDD.py
|
import os
import scipy.io
import numpy as np
from src.datahandler.denoise_dataset import DenoiseDataSet
from . import regist_dataset
@regist_dataset
class SIDD(DenoiseDataSet):
'''
SIDD datatset class using original images.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
dataset_path = os.path.join(self.dataset_dir, 'SIDD/SIDD_Medium_Srgb/Data')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
# scan all image path & info in dataset path
for folder_name in os.listdir(dataset_path):
# parse folder name of each shot
parsed_name = self._parse_folder_name(folder_name)
# add path & information of image 0
info0 = {}
info0['instances'] = parsed_name
info0['clean_img_path'] = os.path.join(dataset_path, folder_name, '%s_GT_SRGB_010.PNG'%parsed_name['scene_instance_number'])
info0['noisy_img_path'] = os.path.join(dataset_path, folder_name, '%s_NOISY_SRGB_010.PNG'%parsed_name['scene_instance_number'])
self.img_paths.append(info0)
# add path & information of image 1
info1 = {}
info1['instances'] = parsed_name
info1['clean_img_path'] = os.path.join(dataset_path, folder_name, '%s_GT_SRGB_011.PNG'%parsed_name['scene_instance_number'])
info1['noisy_img_path'] = os.path.join(dataset_path, folder_name, '%s_NOISY_SRGB_011.PNG'%parsed_name['scene_instance_number'])
self.img_paths.append(info1)
def _load_data(self, data_idx):
info = self.img_paths[data_idx]
clean_img = self._load_img(info['clean_img_path'])
noisy_img = self._load_img(info['noisy_img_path'])
return {'clean': clean_img, 'real_noisy': noisy_img, 'instances': info['instances'] }
def _parse_folder_name(self, name):
parsed = {}
splited = name.split('_')
parsed['scene_instance_number'] = splited[0]
parsed['scene_number'] = splited[1]
parsed['smartphone_camera_code'] = splited[2]
parsed['ISO_speed'] = splited[3]
parsed['shutter_speed'] = splited[4]
parsed['illuminant_temperature'] = splited[5]
parsed['illuminant_brightness_code'] = splited[6]
return parsed
@regist_dataset
class prep_SIDD(DenoiseDataSet):
'''
dataset class for prepared SIDD dataset which is cropped with overlap.
[using size 512x512 with 128 overlapping]
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
self.dataset_path = os.path.join(self.dataset_dir, 'prep/SIDD_s512_o128')
assert os.path.exists(self.dataset_path), 'There is no dataset %s'%self.dataset_path
for root, _, files in os.walk(os.path.join(self.dataset_path, 'RN')):
self.img_paths = files
def _load_data(self, data_idx):
file_name = self.img_paths[data_idx]
noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name))
clean = self._load_img(os.path.join(self.dataset_path, 'CL' , file_name))
return {'clean': clean, 'real_noisy': noisy_img} #'instances': instance }
@regist_dataset
class SIDD_val(DenoiseDataSet):
'''
SIDD validation dataset class
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
dataset_path = os.path.join(self.dataset_dir, 'SIDD')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
clean_mat_file_path = os.path.join(dataset_path, 'ValidationGtBlocksSrgb.mat')
noisy_mat_file_path = os.path.join(dataset_path, 'ValidationNoisyBlocksSrgb.mat')
self.clean_patches = np.array(scipy.io.loadmat(clean_mat_file_path, appendmat=False)['ValidationGtBlocksSrgb'])
self.noisy_patches = np.array(scipy.io.loadmat(noisy_mat_file_path, appendmat=False)['ValidationNoisyBlocksSrgb'])
# for __len__(), make img_paths have same length
# number of all possible patch is 1280
for _ in range(1280):
self.img_paths.append(None)
def _load_data(self, data_idx):
img_id = data_idx // 32
patch_id = data_idx % 32
clean_img = self.clean_patches[img_id, patch_id, :].astype(float)
noisy_img = self.noisy_patches[img_id, patch_id, :].astype(float)
clean_img = self._load_img_from_np(clean_img)
noisy_img = self._load_img_from_np(noisy_img)
return {'clean': clean_img, 'real_noisy': noisy_img }
@regist_dataset
class SIDD_benchmark(DenoiseDataSet):
'''
SIDD benchmark dataset class
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
dataset_path = os.path.join(self.dataset_dir, 'SIDD')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
mat_file_path = os.path.join(dataset_path, 'BenchmarkNoisyBlocksSrgb.mat')
self.noisy_patches = np.array(scipy.io.loadmat(mat_file_path, appendmat=False)['BenchmarkNoisyBlocksSrgb'])
# for __len__(), make img_paths have same length
# number of all possible patch is 1280
for _ in range(1280):
self.img_paths.append(None)
def _load_data(self, data_idx):
img_id = data_idx // 32
patch_id = data_idx % 32
noisy_img = self.noisy_patches[img_id, patch_id, :].astype(float)
noisy_img = self._load_img_from_np(noisy_img)
return {'real_noisy': noisy_img}
@regist_dataset
class prep_SIDD_benchmark(DenoiseDataSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
self.dataset_path = os.path.join(self.dataset_dir, 'prep/SIDD_benchmark_s256_o0')
assert os.path.exists(self.dataset_path), 'There is no dataset %s'%self.dataset_path
for root, _, files in os.walk(os.path.join(self.dataset_path, 'RN')):
self.img_paths = files
def _load_data(self, data_idx):
file_name = self.img_paths[data_idx]
noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name))
return {'real_noisy': noisy_img} #'instances': instance }
| 6,414
| 37.413174
| 139
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/NIND.py
|
import os
from src.datahandler.denoise_dataset import DenoiseDataSet
from . import regist_dataset
@regist_dataset
class NIND(DenoiseDataSet):
'''
NIND datatset class using original images.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
dataset_path = os.path.join(self.dataset_dir, 'NIND')
assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path
# scan all image path & info in dataset path
for folder_name in os.listdir(dataset_path):
for (dirpath, _, filenames) in os.walk(os.path.join(dataset_path, folder_name)):
filenames = sorted(filenames, key=self.ISO_sortkey)
for filename in filenames:
# lowest ISO image is used as clean image
if filename == filenames[0]: continue
# parse filename into se
parsed_name = self._parse_filename(filename)
info = {}
info['instances'] = parsed_name
info['noisy_img_path'] = os.path.join(dirpath, filename)
info['clean_img_path'] = os.path.join(dirpath, filenames[0]) # clean image is the lowest ISO image.
self.img_paths.append(info)
def _load_data(self, data_idx):
info = self.img_paths[data_idx]
clean_img = self._load_img(info['clean_img_path'])
noisy_img = self._load_img(info['noisy_img_path'])
return {'clean': clean_img, 'real_noisy': noisy_img} #, 'instances': info['instances']}
def _parse_filename(self, name):
parsed = {}
splited = name.split('.')[0].split('_') # NIND_Scene_ISO
parsed['scene'] = splited[1]
parsed['ISO'] = splited[2]
return parsed
def ISO_sortkey(self, name):
code = name.split('ISO')[1].split('.')[0]
if 'H' in code:
if code == 'H1':
return 10000
elif code == 'H2':
return 20000
elif code == 'H3':
return 30000
elif code == 'H4':
return 40000
else:
raise RuntimeError('%s'%code)
elif '-' in code:
return int(code.split('-')[0]) + int(code.split('-')[1])
else:
return int(code)
@regist_dataset
class prep_NIND(DenoiseDataSet):
'''
dataset class for prepared NIND dataset which is cropped with overlap.
[using size 512x512 with 128 overlapping]
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _scan(self):
self.dataset_path = os.path.join(self.dataset_dir, 'prep/NIND_s512_o128')
assert os.path.exists(self.dataset_path), 'There is no dataset %s'%self.dataset_path
for root, _, files in os.walk(os.path.join(self.dataset_path, 'RN')):
self.img_paths = files
def _load_data(self, data_idx):
file_name = self.img_paths[data_idx]
noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name))
clean = self._load_img(os.path.join(self.dataset_path, 'CL' , file_name))
return {'clean': clean, 'real_noisy': noisy_img} #'instances': instance }
| 3,354
| 35.075269
| 120
|
py
|
AP-BSN
|
AP-BSN-master/src/datahandler/__init__.py
|
import os
from importlib import import_module
from .denoise_dataset import ReturnMergedDataset
dataset_class_dict = {}
def regist_dataset(dataset_class):
dataset_name = dataset_class.__name__.lower()
assert not dataset_name in dataset_class_dict, 'there is already registered dataset: %s in dataset_class_dict.' % dataset_name
dataset_class_dict[dataset_name] = dataset_class
return dataset_class
def get_dataset_class(dataset_name):
dataset_name = dataset_name.lower()
# Case of using multiple dataset
if len(dataset_name.split('+')) > 1:
merge_data_list = []
for d in dataset_name.replace(' ', '').split('+'):
merge_data_list.append(d)
return ReturnMergedDataset(merge_data_list)
# Single dataset
else:
return dataset_class_dict[dataset_name]
# import all python files in model folder
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
import_module('src.datahandler.{}'.format(module[:-3]))
del module
| 1,076
| 29.771429
| 130
|
py
|
AP-BSN
|
AP-BSN-master/src/loss/recon.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import regist_loss
eps = 1e-6
# ============================ #
# Reconstruction loss #
# ============================ #
@regist_loss
class L1():
def __call__(self, input_data, model_output, data, module):
output = model_output['recon']
return F.l1_loss(output, data['clean'])
@regist_loss
class L2():
def __call__(self, input_data, model_output, data, module):
output = model_output['recon']
return F.mse_loss(output, data['clean'])
| 563
| 20.692308
| 63
|
py
|
AP-BSN
|
AP-BSN-master/src/loss/__init__.py
|
import os
from importlib import import_module
import torch
import torch.nn as nn
loss_class_dict = {}
def regist_loss(loss_class):
loss_name = loss_class.__name__
assert not loss_name in loss_class_dict, 'there is already registered loss name: %s in loss_class_dict.' % loss_name
loss_class_dict[loss_name] = loss_class
return loss_class
'''
## default format of loss ##
@regist_loss
class ():
def __call__(self, input_data, model_output, data, model):
## example of loss: L1 loss ##
@regist_loss
class L1():
def __call__(self, input_data, model_output, data, module):
output = model_output['recon']
return F.l1_loss(output, data['clean'])
'''
# import all python files in model folder
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
import_module('src.loss.{}'.format(module[:-3]))
del module
class Loss(nn.Module):
def __init__(self, loss_string, tmp_info=[]):
super().__init__()
loss_string = loss_string.replace(' ', '')
# parse loss string
self.loss_list = []
for single_loss in loss_string.split('+'):
weight, name = single_loss.split('*')
ratio = True if 'r' in weight else False
weight = float(weight.replace('r', ''))
if name in loss_class_dict:
self.loss_list.append({ 'name': name,
'weight': float(weight),
'func': loss_class_dict[name](),
'ratio': ratio})
else:
raise RuntimeError('undefined loss term: {}'.format(name))
# parse temporal information string
self.tmp_info_list = []
for name in tmp_info:
if name in loss_class_dict:
self.tmp_info_list.append({ 'name': name,
'func': loss_class_dict[name]()})
else:
raise RuntimeError('undefined loss term: {}'.format(name))
def forward(self, input_data, model_output, data, module, loss_name=None, change_name=None, ratio=1.0):
'''
forward all loss and return as dict format.
Args
input_data : input of the network (also in the data)
model_output : output of the network
data : entire batch of data
module : dictionary of modules (for another network forward)
loss_name : (optional) choose specific loss with name
change_name : (optional) replace name of chosen loss
ratio : (optional) percentage of learning procedure for increase weight during training
Return
losses : dictionary of loss
'''
loss_arg = (input_data, model_output, data, module)
# calculate only specific loss 'loss_name' and change its name to 'change_name'
if loss_name is not None:
for single_loss in self.loss_list:
if loss_name == single_loss['name']:
loss = single_loss['weight'] * single_loss['func'](*loss_arg)
if single_loss['ratio']: loss *= ratio
if change_name is not None:
return {change_name: loss}
return {single_loss['name']: loss}
raise RuntimeError('there is no such loss in training losses: {}'.format(loss_name))
# normal case: calculate all training losses at one time
losses = {}
for single_loss in self.loss_list:
losses[single_loss['name']] = single_loss['weight'] * single_loss['func'](*loss_arg)
if single_loss['ratio']: losses[single_loss['name']] *= ratio
# calculate temporal information
tmp_info = {}
for single_tmp_info in self.tmp_info_list:
# don't need gradient
with torch.no_grad():
tmp_info[single_tmp_info['name']] = single_tmp_info['func'](*loss_arg)
return losses, tmp_info
| 4,173
| 36.267857
| 120
|
py
|
AP-BSN
|
AP-BSN-master/src/loss/recon_self.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import regist_loss
eps = 1e-6
# ============================ #
# Self-reconstruction loss #
# ============================ #
@regist_loss
class self_L1():
def __call__(self, input_data, model_output, data, module):
output = model_output['recon']
target_noisy = data['syn_noisy'] if 'syn_noisy' in data else data['real_noisy']
return F.l1_loss(output, target_noisy)
@regist_loss
class self_L2():
def __call__(self, input_data, model_output, data, module):
output = model_output['recon']
target_noisy = data['syn_noisy'] if 'syn_noisy' in data else data['real_noisy']
return F.mse_loss(output, target_noisy)
| 750
| 24.896552
| 87
|
py
|
AP-BSN
|
AP-BSN-master/src/model/DBSNl.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import regist_model
@regist_model
class DBSNl(nn.Module):
'''
Dilated Blind-Spot Network (cutomized light version)
self-implemented version of the network from "Unpaired Learning of Deep Image Denoising (ECCV 2020)"
and several modificaions are included.
see our supple for more details.
'''
def __init__(self, in_ch=3, out_ch=3, base_ch=128, num_module=9):
'''
Args:
in_ch : number of input channel
out_ch : number of output channel
base_ch : number of base channel
num_module : number of modules in the network
'''
super().__init__()
assert base_ch%2 == 0, "base channel should be divided with 2"
ly = []
ly += [ nn.Conv2d(in_ch, base_ch, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
self.head = nn.Sequential(*ly)
self.branch1 = DC_branchl(2, base_ch, num_module)
self.branch2 = DC_branchl(3, base_ch, num_module)
ly = []
ly += [ nn.Conv2d(base_ch*2, base_ch, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(base_ch, base_ch//2, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(base_ch//2, base_ch//2, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(base_ch//2, out_ch, kernel_size=1) ]
self.tail = nn.Sequential(*ly)
def forward(self, x):
x = self.head(x)
br1 = self.branch1(x)
br2 = self.branch2(x)
x = torch.cat([br1, br2], dim=1)
return self.tail(x)
def _initialize_weights(self):
# Liyong version
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)
class DC_branchl(nn.Module):
def __init__(self, stride, in_ch, num_module):
super().__init__()
ly = []
ly += [ CentralMaskedConv2d(in_ch, in_ch, kernel_size=2*stride-1, stride=1, padding=stride-1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ DCl(stride, in_ch) for _ in range(num_module) ]
ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ]
ly += [ nn.ReLU(inplace=True) ]
self.body = nn.Sequential(*ly)
def forward(self, x):
return self.body(x)
class DCl(nn.Module):
def __init__(self, stride, in_ch):
super().__init__()
ly = []
ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=1, padding=stride, dilation=stride) ]
ly += [ nn.ReLU(inplace=True) ]
ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ]
self.body = nn.Sequential(*ly)
def forward(self, x):
return x + self.body(x)
class CentralMaskedConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kH//2, kH//2] = 0
def forward(self, x):
self.weight.data *= self.mask
return super().forward(x)
| 3,510
| 30.630631
| 104
|
py
|
AP-BSN
|
AP-BSN-master/src/model/__init__.py
|
import os
from importlib import import_module
model_class_dict = {}
def regist_model(model_class):
model_name = model_class.__name__.lower()
assert not model_name in model_class_dict, 'there is already registered model: %s in model_class_dict.' % model_name
model_class_dict[model_name] = model_class
return model_class
def get_model_class(model_name:str):
model_name = model_name.lower()
return model_class_dict[model_name]
# import all python files in model folder
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
import_module('src.model.{}'.format(module[:-3]))
del module
| 687
| 28.913043
| 120
|
py
|
AP-BSN
|
AP-BSN-master/src/model/APBSN.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..util.util import pixel_shuffle_down_sampling, pixel_shuffle_up_sampling
from . import regist_model
from .DBSNl import DBSNl
@regist_model
class APBSN(nn.Module):
'''
Asymmetric PD Blind-Spot Network (AP-BSN)
'''
def __init__(self, pd_a=5, pd_b=2, pd_pad=2, R3=True, R3_T=8, R3_p=0.16,
bsn='DBSNl', in_ch=3, bsn_base_ch=128, bsn_num_module=9):
'''
Args:
pd_a : 'PD stride factor' during training
pd_b : 'PD stride factor' during inference
pd_pad : pad size between sub-images by PD process
R3 : flag of 'Random Replacing Refinement'
R3_T : number of masks for R3
R3_p : probability of R3
bsn : blind-spot network type
in_ch : number of input image channel
bsn_base_ch : number of bsn base channel
bsn_num_module : number of module
'''
super().__init__()
# network hyper-parameters
self.pd_a = pd_a
self.pd_b = pd_b
self.pd_pad = pd_pad
self.R3 = R3
self.R3_T = R3_T
self.R3_p = R3_p
# define network
if bsn == 'DBSNl':
self.bsn = DBSNl(in_ch, in_ch, bsn_base_ch, bsn_num_module)
else:
raise NotImplementedError('bsn %s is not implemented'%bsn)
def forward(self, img, pd=None):
'''
Foward function includes sequence of PD, BSN and inverse PD processes.
Note that denoise() function is used during inference time (for differenct pd factor and R3).
'''
# default pd factor is training factor (a)
if pd is None: pd = self.pd_a
# do PD
if pd > 1:
pd_img = pixel_shuffle_down_sampling(img, f=pd, pad=self.pd_pad)
else:
p = self.pd_pad
pd_img = F.pad(img, (p,p,p,p))
# forward blind-spot network
pd_img_denoised = self.bsn(pd_img)
# do inverse PD
if pd > 1:
img_pd_bsn = pixel_shuffle_up_sampling(pd_img_denoised, f=pd, pad=self.pd_pad)
else:
p = self.pd_pad
img_pd_bsn = pd_img_denoised[:,:,p:-p,p:-p]
return img_pd_bsn
def denoise(self, x):
'''
Denoising process for inference.
'''
b,c,h,w = x.shape
# pad images for PD process
if h % self.pd_b != 0:
x = F.pad(x, (0, 0, 0, self.pd_b - h%self.pd_b), mode='constant', value=0)
if w % self.pd_b != 0:
x = F.pad(x, (0, self.pd_b - w%self.pd_b, 0, 0), mode='constant', value=0)
# forward PD-BSN process with inference pd factor
img_pd_bsn = self.forward(img=x, pd=self.pd_b)
# Random Replacing Refinement
if not self.R3:
''' Directly return the result (w/o R3) '''
return img_pd_bsn[:,:,:h,:w]
else:
denoised = torch.empty(*(x.shape), self.R3_T, device=x.device)
for t in range(self.R3_T):
indice = torch.rand_like(x)
mask = indice < self.R3_p
tmp_input = torch.clone(img_pd_bsn).detach()
tmp_input[mask] = x[mask]
p = self.pd_pad
tmp_input = F.pad(tmp_input, (p,p,p,p), mode='reflect')
if self.pd_pad == 0:
denoised[..., t] = self.bsn(tmp_input)
else:
denoised[..., t] = self.bsn(tmp_input)[:,:,p:-p,p:-p]
return torch.mean(denoised, dim=-1)
'''
elif self.R3 == 'PD-refinement':
s = 2
denoised = torch.empty(*(x.shape), s**2, device=x.device)
for i in range(s):
for j in range(s):
tmp_input = torch.clone(x_mean).detach()
tmp_input[:,:,i::s,j::s] = x[:,:,i::s,j::s]
p = self.pd_pad
tmp_input = F.pad(tmp_input, (p,p,p,p), mode='reflect')
if self.pd_pad == 0:
denoised[..., i*s+j] = self.bsn(tmp_input)
else:
denoised[..., i*s+j] = self.bsn(tmp_input)[:,:,p:-p,p:-p]
return_denoised = torch.mean(denoised, dim=-1)
else:
raise RuntimeError('post-processing type not supported')
'''
| 4,559
| 34.625
| 101
|
py
|
CoTr
|
CoTr-main/nnUNet/setup.py
|
from setuptools import setup, find_namespace_packages
setup(name='nnunet',
packages=find_namespace_packages(include=["nnunet", "nnunet.*"]),
version='1.6.6',
description='nnU-Net. Framework for out-of-the box biomedical image segmentation.',
url='https://github.com/MIC-DKFZ/nnUNet',
author='Division of Medical Image Computing, German Cancer Research Center',
author_email='f.isensee@dkfz-heidelberg.de',
license='Apache License Version 2.0, January 2004',
install_requires=[
"tqdm",
"dicom2nifti",
"scikit-image>=0.14",
"medpy",
"scipy",
"batchgenerators>=0.21",
"numpy",
"sklearn",
"SimpleITK",
"pandas",
"requests",
"nibabel", 'tifffile'
],
entry_points={
'console_scripts': [
'nnUNet_convert_decathlon_task = nnunet.experiment_planning.nnUNet_convert_decathlon_task:main',
'nnUNet_plan_and_preprocess = nnunet.experiment_planning.nnUNet_plan_and_preprocess:main',
'nnUNet_train = nnunet.run.run_training:main',
'nnUNet_train_DP = nnunet.run.run_training_DP:main',
'nnUNet_train_DDP = nnunet.run.run_training_DDP:main',
'nnUNet_predict = nnunet.inference.predict_simple:main',
'nnUNet_ensemble = nnunet.inference.ensemble_predictions:main',
'nnUNet_find_best_configuration = nnunet.evaluation.model_selection.figure_out_what_to_submit:main',
'nnUNet_print_available_pretrained_models = nnunet.inference.pretrained_models.download_pretrained_model:print_available_pretrained_models',
'nnUNet_print_pretrained_model_info = nnunet.inference.pretrained_models.download_pretrained_model:print_pretrained_model_requirements',
'nnUNet_download_pretrained_model = nnunet.inference.pretrained_models.download_pretrained_model:download_by_name',
'nnUNet_download_pretrained_model_by_url = nnunet.inference.pretrained_models.download_pretrained_model:download_by_url',
'nnUNet_determine_postprocessing = nnunet.postprocessing.consolidate_postprocessing_simple:main',
'nnUNet_export_model_to_zip = nnunet.inference.pretrained_models.collect_pretrained_models:export_entry_point',
'nnUNet_install_pretrained_model_from_zip = nnunet.inference.pretrained_models.download_pretrained_model:install_from_zip_entry_point',
'nnUNet_change_trainer_class = nnunet.inference.change_trainer:main',
'nnUNet_evaluate_folder = nnunet.evaluation.evaluator:nnunet_evaluate_folder'
],
},
keywords=['deep learning', 'image segmentation', 'medical image analysis',
'medical image segmentation', 'nnU-Net', 'nnunet']
)
| 2,887
| 57.938776
| 154
|
py
|
CoTr
|
CoTr-main/nnUNet/tests/test_steps_for_sliding_window_prediction.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest2
import numpy as np
from nnunet.network_architecture.neural_network import SegmentationNetwork
class TestSlidingWindow(unittest2.TestCase):
def setUp(self) -> None:
pass
def _verify_steps(self, steps, patch_size, image_size, step_size):
debug_information = 'steps= %s\nimage_size= %s\npatch_size= %s\nstep_size= %0.4f' % (str(steps),
str(image_size),
str(patch_size), step_size)
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
# this code is copied form the current implementation. Not ideal, but I don't know hoe else to the the
# expected num_steps
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels,
patch_size)]
self.assertTrue(all([len(i) == num_steps[j] for j, i in enumerate(steps)]),
'steps do not match expected num_steps %s. \nDebug: %s' % (str(num_steps), debug_information))
for dim in range(len(steps)):
# first step must start at 0
self.assertTrue(steps[dim][0] == 0)
# last step + patch size must equal to image size
self.assertTrue(steps[dim][-1] + patch_size[dim] == image_size[dim], 'not the whole image is covered. '
'\nDebug: %s' % debug_information)
# there cannot be gaps between adjacent predictions
self.assertTrue(all([steps[dim][i + 1] <= steps[dim][i] + patch_size[dim] for i in
range(num_steps[dim] - 1)]), 'steps are not overlapping or touching. dim: %d, steps:'
' %s, image_size: %s, patch_size: %s, step_size: '
'%0.4f' % (
dim, str(steps[dim]), str(image_size[dim]), str(patch_size[dim]), step_size))
# two successive steps cannot be further apart than target_step_sizes_in_voxels
self.assertTrue(all([steps[dim][i] + np.ceil(target_step_sizes_in_voxels[dim]) >= steps[dim][i + 1] for i
in range(num_steps[dim] -1)]),
'consecutive steps are too far apart. Steps: %s, dim: %d. \nDebug: %s' %
(str(steps[dim]), dim, debug_information))
def test_same_image_and_patch_size_3d(self):
image_size = (24, 845, 321)
patch_size = (24, 845, 321)
# this should always return steps=[[0],[0],[0]] no matter what step_size we choose
expected_result = [[0], [0], [0]]
step_size = 1
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
step_size = 0.125
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
def test_same_image_and_patch_size_2d(self):
image_size = (123, 143)
patch_size = (123, 143)
# this should always return steps=[[0],[0]] no matter what step_size we choose
expected_result = [[0], [0]]
step_size = 1
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
step_size = 0.125
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == expected_result)
def test_some_manually_verified_combinations(self):
image_size = (128, 260)
patch_size = (64, 130)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 32, 64], [0, 65, 130]])
step_size = 0.85
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 32, 64], [0, 65, 130]])
step_size = 1
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 64], [0, 130]])
# an example from task02
image_size = (146, 176, 148)
patch_size = (128, 128, 128)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 18], [0, 48], [0, 20]])
# heart
image_size = (130, 320, 244)
patch_size = (80, 192, 160)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 25, 50], [0, 64, 128], [0, 42, 84]])
step_size = 0.75
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 50], [0, 128], [0, 84]])
# liver
image_size = (424, 456, 456)
patch_size = (128, 128, 128)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 59, 118, 178, 237, 296],
[0, 55, 109, 164, 219, 273, 328],
[0, 55, 109, 164, 219, 273, 328]]
)
# hippo
image_size = (40, 56, 40)
patch_size = (40, 56, 40)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0],
[0],
[0]]
)
# hepaticvessel
image_size = (94, 308, 308)
patch_size = (64, 192, 192)
step_size = 0.5
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self.assertTrue(steps == [[0, 30],
[0, 58, 116],
[0, 58, 116]]
)
def test_loads_of_combinations(self):
"""
We now take a large number of random combinations and perform sanity checks
:return:
"""
for _ in range(5000):
dim = np.random.choice((2, 3))
patch_size = tuple(np.random.randint(16, 1024, dim))
image_size = tuple(np.random.randint(i / 2, i * 10) for i in patch_size)
image_size = tuple(max(image_size[i], patch_size[i]) for i in range(len(image_size)))
step_size = np.random.uniform(0.01, 1)
#print(image_size, patch_size, step_size)
steps = SegmentationNetwork._compute_steps_for_sliding_window(patch_size, image_size, step_size)
self._verify_steps(steps, patch_size, image_size, step_size)
if __name__ == '__main__':
unittest.main()
| 8,445
| 44.408602
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/configuration.py
|
import os
default_num_threads = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc'])
RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis
# separately (with NN)
| 257
| 50.6
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/__init__.py
|
from __future__ import absolute_import
print("\n\nPlease cite the following paper when using nnUNet:\n\nIsensee, F., Jaeger, P.F., Kohl, S.A.A. et al. "
"\"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation.\" "
"Nat Methods (2020). https://doi.org/10.1038/s41592-020-01008-z\n\n")
print("If you have questions or suggestions, feel free to open an issue at https://github.com/MIC-DKFZ/nnUNet\n")
from . import *
| 462
| 65.142857
| 113
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/paths.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join
# do not modify these unless you know what you are doing
my_output_identifier = "nnUNet"
default_plans_identifier = "nnUNetPlansv2.1"
default_data_identifier = 'nnUNet'
default_trainer = "nnUNetTrainerV2"
default_cascade_trainer = "nnUNetTrainerV2CascadeFullRes"
"""
PLEASE READ paths.md FOR INFORMATION TO HOW TO SET THIS UP
"""
base = os.environ['nnUNet_raw_data_base'] if "nnUNet_raw_data_base" in os.environ.keys() else None
preprocessing_output_dir = os.environ['nnUNet_preprocessed'] if "nnUNet_preprocessed" in os.environ.keys() else None
network_training_output_dir_base = os.path.join(os.environ['RESULTS_FOLDER']) if "RESULTS_FOLDER" in os.environ.keys() else None
if base is not None:
nnUNet_raw_data = join(base, "nnUNet_raw_data")
nnUNet_cropped_data = join(base, "nnUNet_cropped_data")
maybe_mkdir_p(nnUNet_raw_data)
maybe_mkdir_p(nnUNet_cropped_data)
else:
print("nnUNet_raw_data_base is not defined and nnU-Net can only be used on data for which preprocessed files "
"are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like "
"this. If this is not intended, please read nnunet/paths.md for information on how to set this up properly.")
nnUNet_cropped_data = nnUNet_raw_data = None
if preprocessing_output_dir is not None:
maybe_mkdir_p(preprocessing_output_dir)
else:
print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing "
"or training. If this is not intended, please read nnunet/pathy.md for information on how to set this up.")
preprocessing_output_dir = None
if network_training_output_dir_base is not None:
network_training_output_dir = join(network_training_output_dir_base, my_output_identifier)
maybe_mkdir_p(network_training_output_dir)
else:
print("RESULTS_FOLDER is not defined and nnU-Net cannot be used for training or "
"inference. If this is not intended behavior, please read nnunet/paths.md for information on how to set this "
"up")
network_training_output_dir = None
| 2,879
| 47.813559
| 128
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/postprocessing/consolidate_postprocessing_simple.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from nnunet.postprocessing.consolidate_postprocessing import consolidate_folds
from nnunet.utilities.folder_names import get_output_folder_name
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
from nnunet.paths import default_cascade_trainer, default_trainer, default_plans_identifier
def main():
argparser = argparse.ArgumentParser(usage="Used to determine the postprocessing for a trained model. Useful for "
"when the best configuration (2d, 3d_fullres etc) as selected manually.")
argparser.add_argument("-m", type=str, required=True, help="U-Net model (2d, 3d_lowres, 3d_fullres or "
"3d_cascade_fullres)")
argparser.add_argument("-t", type=str, required=True, help="Task name or id")
argparser.add_argument("-tr", type=str, required=False, default=None,
help="nnUNetTrainer class. Default: %s, unless 3d_cascade_fullres "
"(then it's %s)" % (default_trainer, default_cascade_trainer))
argparser.add_argument("-pl", type=str, required=False, default=default_plans_identifier,
help="Plans name, Default=%s" % default_plans_identifier)
argparser.add_argument("-val", type=str, required=False, default="validation_raw",
help="Validation folder name. Default: validation_raw")
args = argparser.parse_args()
model = args.m
task = args.t
trainer = args.tr
plans = args.pl
val = args.val
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if trainer is None:
if model == "3d_cascade_fullres":
trainer = "nnUNetTrainerV2CascadeFullRes"
else:
trainer = "nnUNetTrainerV2"
folder = get_output_folder_name(model, task, trainer, plans, None)
consolidate_folds(folder, val)
if __name__ == "__main__":
main()
| 2,729
| 43.754098
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/postprocessing/consolidate_postprocessing.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from typing import Tuple
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.postprocessing.connected_components import determine_postprocessing
import argparse
def collect_cv_niftis(cv_folder: str, output_folder: str, validation_folder_name: str = 'validation_raw',
folds: tuple = (0, 1, 2, 3, 4)):
folders_folds = [join(cv_folder, "fold_%d" % i) for i in folds]
assert all([isdir(i) for i in folders_folds]), "some folds are missing"
# now for each fold, read the postprocessing json. this will tell us what the name of the validation folder is
validation_raw_folders = [join(cv_folder, "fold_%d" % i, validation_folder_name) for i in folds]
# now copy all raw niftis into cv_niftis_raw
maybe_mkdir_p(output_folder)
for f in folds:
niftis = subfiles(validation_raw_folders[f], suffix=".nii.gz")
for n in niftis:
shutil.copy(n, join(output_folder))
def consolidate_folds(output_folder_base, validation_folder_name: str = 'validation_raw',
advanced_postprocessing: bool = False, folds: Tuple[int] = (0, 1, 2, 3, 4)):
"""
Used to determine the postprocessing for an experiment after all five folds have been completed. In the validation of
each fold, the postprocessing can only be determined on the cases within that fold. This can result in different
postprocessing decisions for different folds. In the end, we can only decide for one postprocessing per experiment,
so we have to rerun it
:param folds:
:param advanced_postprocessing:
:param output_folder_base:experiment output folder (fold_0, fold_1, etc must be subfolders of the given folder)
:param validation_folder_name: dont use this
:return:
"""
output_folder_raw = join(output_folder_base, "cv_niftis_raw")
output_folder_gt = join(output_folder_base, "gt_niftis")
collect_cv_niftis(output_folder_base, output_folder_raw, validation_folder_name,
folds)
num_niftis_gt = len(subfiles(join(output_folder_base, "gt_niftis")))
# count niftis in there
num_niftis = len(subfiles(output_folder_raw))
if num_niftis != num_niftis_gt:
shutil.rmtree(output_folder_raw)
raise AssertionError("If does not seem like you trained all the folds! Train all folds first!")
# load a summary file so that we can know what class labels to expect
summary_fold0 = load_json(join(output_folder_base, "fold_0", validation_folder_name, "summary.json"))['results'][
'mean']
classes = [int(i) for i in summary_fold0.keys()]
niftis = subfiles(output_folder_raw, join=False, suffix=".nii.gz")
test_pred_pairs = [(join(output_folder_gt, i), join(output_folder_raw, i)) for i in niftis]
# determine_postprocessing needs a summary.json file in the folder where the raw predictions are. We could compute
# that from the summary files of the five folds but I am feeling lazy today
aggregate_scores(test_pred_pairs, labels=classes, json_output_file=join(output_folder_raw, "summary.json"),
num_threads=default_num_threads)
determine_postprocessing(output_folder_base, output_folder_gt, 'cv_niftis_raw',
final_subf_name="cv_niftis_postprocessed", processes=default_num_threads,
advanced_postprocessing=advanced_postprocessing)
# determine_postprocessing will create a postprocessing.json file that can be used for inference
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-f", type=str, required=True, help="experiment output folder (fold_0, fold_1, "
"etc must be subfolders of the given folder)")
args = argparser.parse_args()
folder = args.f
consolidate_folds(folder)
| 4,711
| 48.6
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/postprocessing/consolidate_all_for_paper.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.utilities.folder_names import get_output_folder_name
def get_datasets():
configurations_all = {
"Task01_BrainTumour": ("3d_fullres", "2d"),
"Task02_Heart": ("3d_fullres", "2d",),
"Task03_Liver": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task04_Hippocampus": ("3d_fullres", "2d",),
"Task05_Prostate": ("3d_fullres", "2d",),
"Task06_Lung": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task07_Pancreas": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task08_HepaticVessel": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task09_Spleen": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task10_Colon": ("3d_cascade_fullres", "3d_fullres", "3d_lowres", "2d"),
"Task48_KiTS_clean": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d"),
"Task27_ACDC": ("3d_fullres", "2d",),
"Task24_Promise": ("3d_fullres", "2d",),
"Task35_ISBILesionSegmentation": ("3d_fullres", "2d",),
"Task38_CHAOS_Task_3_5_Variant2": ("3d_fullres", "2d",),
"Task29_LITS": ("3d_cascade_fullres", "3d_lowres", "2d", "3d_fullres",),
"Task17_AbdominalOrganSegmentation": ("3d_cascade_fullres", "3d_lowres", "2d", "3d_fullres",),
"Task55_SegTHOR": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d",),
"Task56_VerSe": ("3d_cascade_fullres", "3d_lowres", "3d_fullres", "2d",),
}
return configurations_all
def get_commands(configurations, regular_trainer="nnUNetTrainerV2", cascade_trainer="nnUNetTrainerV2CascadeFullRes",
plans="nnUNetPlansv2.1"):
node_pool = ["hdf18-gpu%02.0d" % i for i in range(1, 21)] + ["hdf19-gpu%02.0d" % i for i in range(1, 8)] + ["hdf19-gpu%02.0d" % i for i in range(11, 16)]
ctr = 0
for task in configurations:
models = configurations[task]
for m in models:
if m == "3d_cascade_fullres":
trainer = cascade_trainer
else:
trainer = regular_trainer
folder = get_output_folder_name(m, task, trainer, plans, overwrite_training_output_dir="/datasets/datasets_fabian/results/nnUNet")
node = node_pool[ctr % len(node_pool)]
print("bsub -m %s -q gputest -L /bin/bash \"source ~/.bashrc && python postprocessing/"
"consolidate_postprocessing.py -f" % node, folder, "\"")
ctr += 1
| 3,162
| 50.016129
| 157
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/postprocessing/connected_components.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from scipy.ndimage import label
import SimpleITK as sitk
from nnunet.utilities.sitk_stuff import copy_geometry
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
def load_remove_save(input_file: str, output_file: str, for_which_classes: list,
minimum_valid_object_size: dict = None):
# Only objects larger than minimum_valid_object_size will be removed. Keys in minimum_valid_object_size must
# match entries in for_which_classes
img_in = sitk.ReadImage(input_file)
img_npy = sitk.GetArrayFromImage(img_in)
volume_per_voxel = float(np.prod(img_in.GetSpacing(), dtype=np.float64))
image, largest_removed, kept_size = remove_all_but_the_largest_connected_component(img_npy, for_which_classes,
volume_per_voxel,
minimum_valid_object_size)
# print(input_file, "kept:", kept_size)
img_out_itk = sitk.GetImageFromArray(image)
img_out_itk = copy_geometry(img_out_itk, img_in)
sitk.WriteImage(img_out_itk, output_file)
return largest_removed, kept_size
def remove_all_but_the_largest_connected_component(image: np.ndarray, for_which_classes: list, volume_per_voxel: float,
minimum_valid_object_size: dict = None):
"""
removes all but the largest connected component, individually for each class
:param image:
:param for_which_classes: can be None. Should be list of int. Can also be something like [(1, 2), 2, 4].
Here (1, 2) will be treated as a joint region, not individual classes (example LiTS here we can use (1, 2)
to use all foreground classes together)
:param minimum_valid_object_size: Only objects larger than minimum_valid_object_size will be removed. Keys in
minimum_valid_object_size must match entries in for_which_classes
:return:
"""
if for_which_classes is None:
for_which_classes = np.unique(image)
for_which_classes = for_which_classes[for_which_classes > 0]
assert 0 not in for_which_classes, "cannot remove background"
largest_removed = {}
kept_size = {}
for c in for_which_classes:
if isinstance(c, (list, tuple)):
c = tuple(c) # otherwise it cant be used as key in the dict
mask = np.zeros_like(image, dtype=bool)
for cl in c:
mask[image == cl] = True
else:
mask = image == c
# get labelmap and number of objects
lmap, num_objects = label(mask.astype(int))
# collect object sizes
object_sizes = {}
for object_id in range(1, num_objects + 1):
object_sizes[object_id] = (lmap == object_id).sum() * volume_per_voxel
largest_removed[c] = None
kept_size[c] = None
if num_objects > 0:
# we always keep the largest object. We could also consider removing the largest object if it is smaller
# than minimum_valid_object_size in the future but we don't do that now.
maximum_size = max(object_sizes.values())
kept_size[c] = maximum_size
for object_id in range(1, num_objects + 1):
# we only remove objects that are not the largest
if object_sizes[object_id] != maximum_size:
# we only remove objects that are smaller than minimum_valid_object_size
remove = True
if minimum_valid_object_size is not None:
remove = object_sizes[object_id] < minimum_valid_object_size[c]
if remove:
image[(lmap == object_id) & mask] = 0
if largest_removed[c] is None:
largest_removed[c] = object_sizes[object_id]
else:
largest_removed[c] = max(largest_removed[c], object_sizes[object_id])
return image, largest_removed, kept_size
def load_postprocessing(json_file):
'''
loads the relevant part of the pkl file that is needed for applying postprocessing
:param pkl_file:
:return:
'''
a = load_json(json_file)
if 'min_valid_object_sizes' in a.keys():
min_valid_object_sizes = ast.literal_eval(a['min_valid_object_sizes'])
else:
min_valid_object_sizes = None
return a['for_which_classes'], min_valid_object_sizes
def determine_postprocessing(base, gt_labels_folder, raw_subfolder_name="validation_raw",
temp_folder="temp",
final_subf_name="validation_final", processes=default_num_threads,
dice_threshold=0, debug=False,
advanced_postprocessing=False,
pp_filename="postprocessing.json"):
"""
:param base:
:param gt_labels_folder: subfolder of base with niftis of ground truth labels
:param raw_subfolder_name: subfolder of base with niftis of predicted (non-postprocessed) segmentations
:param temp_folder: used to store temporary data, will be deleted after we are done here undless debug=True
:param final_subf_name: final results will be stored here (subfolder of base)
:param processes:
:param dice_threshold: only apply postprocessing if results is better than old_result+dice_threshold (can be used as eps)
:param debug: if True then the temporary files will not be deleted
:return:
"""
# lets see what classes are in the dataset
classes = [int(i) for i in load_json(join(base, raw_subfolder_name, "summary.json"))['results']['mean'].keys() if
int(i) != 0]
folder_all_classes_as_fg = join(base, temp_folder + "_allClasses")
folder_per_class = join(base, temp_folder + "_perClass")
if isdir(folder_all_classes_as_fg):
shutil.rmtree(folder_all_classes_as_fg)
if isdir(folder_per_class):
shutil.rmtree(folder_per_class)
# multiprocessing rules
p = Pool(processes)
assert isfile(join(base, raw_subfolder_name, "summary.json")), "join(base, raw_subfolder_name) does not " \
"contain a summary.json"
# these are all the files we will be dealing with
fnames = subfiles(join(base, raw_subfolder_name), suffix=".nii.gz", join=False)
# make output and temp dir
maybe_mkdir_p(folder_all_classes_as_fg)
maybe_mkdir_p(folder_per_class)
maybe_mkdir_p(join(base, final_subf_name))
pp_results = {}
pp_results['dc_per_class_raw'] = {}
pp_results['dc_per_class_pp_all'] = {} # dice scores after treating all foreground classes as one
pp_results['dc_per_class_pp_per_class'] = {} # dice scores after removing everything except larges cc
# independently for each class after we already did dc_per_class_pp_all
pp_results['for_which_classes'] = []
pp_results['min_valid_object_sizes'] = {}
validation_result_raw = load_json(join(base, raw_subfolder_name, "summary.json"))['results']
pp_results['num_samples'] = len(validation_result_raw['all'])
validation_result_raw = validation_result_raw['mean']
if advanced_postprocessing:
# first treat all foreground classes as one and remove all but the largest foreground connected component
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,)),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("foreground vs background, smallest valid object size was", min_size_kept[tuple(classes)])
print("removing only objects smaller than that...")
else:
min_size_kept = None
# we need to rerun the step from above, now with the size constraint
pred_gt_tuples = []
results = []
# first treat all foreground classes as one and remove all but the largest foreground connected component
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(folder_all_classes_as_fg, f)
results.append(
p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, (classes,), min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_all_classes_as_fg, "summary.json"),
json_author="Fabian", num_threads=processes)
# now we need to figure out if doing this improved the dice scores. We will implement that defensively in so far
# that if a single class got worse as a result we won't do this. We can change this in the future but right now I
# prefer to do it this way
validation_result_PP_test = load_json(join(folder_all_classes_as_fg, "summary.json"))['results']['mean']
for c in classes:
dc_raw = validation_result_raw[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_raw'][str(c)] = dc_raw
pp_results['dc_per_class_pp_all'][str(c)] = dc_pp
# true if new is better
do_fg_cc = False
comp = [pp_results['dc_per_class_pp_all'][str(cl)] > (pp_results['dc_per_class_raw'][str(cl)] + dice_threshold) for
cl in classes]
before = np.mean([pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
after = np.mean([pp_results['dc_per_class_pp_all'][str(cl)] for cl in classes])
print("Foreground vs background")
print("before:", before)
print("after: ", after)
if any(comp):
# at least one class improved - yay!
# now check if another got worse
# true if new is worse
any_worse = any(
[pp_results['dc_per_class_pp_all'][str(cl)] < pp_results['dc_per_class_raw'][str(cl)] for cl in classes])
if not any_worse:
pp_results['for_which_classes'].append(classes)
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update(deepcopy(min_size_kept))
do_fg_cc = True
print("Removing all but the largest foreground region improved results!")
print('for_which_classes', classes)
print('min_valid_object_sizes', min_size_kept)
else:
# did not improve things - don't do it
pass
if len(classes) > 1:
# now depending on whether we do remove all but the largest foreground connected component we define the source dir
# for the next one to be the raw or the temp dir
if do_fg_cc:
source = folder_all_classes_as_fg
else:
source = join(base, raw_subfolder_name)
if advanced_postprocessing:
# now run this for each class separately
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes),)))
results = [i.get() for i in results]
# aggregate max_size_removed and min_size_kept
max_size_removed = {}
min_size_kept = {}
for tmp in results:
mx_rem, min_kept = tmp[0]
for k in mx_rem:
if mx_rem[k] is not None:
if max_size_removed.get(k) is None:
max_size_removed[k] = mx_rem[k]
else:
max_size_removed[k] = max(max_size_removed[k], mx_rem[k])
for k in min_kept:
if min_kept[k] is not None:
if min_size_kept.get(k) is None:
min_size_kept[k] = min_kept[k]
else:
min_size_kept[k] = min(min_size_kept[k], min_kept[k])
print("classes treated separately, smallest valid object sizes are")
print(min_size_kept)
print("removing only objects smaller than that")
else:
min_size_kept = None
# rerun with the size thresholds from above
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(source, f)
output_file = join(folder_per_class, f)
results.append(p.starmap_async(load_remove_save, ((predicted_segmentation, output_file, classes, min_size_kept),)))
pred_gt_tuples.append([output_file, join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(folder_per_class, "summary.json"),
json_author="Fabian", num_threads=processes)
if do_fg_cc:
old_res = deepcopy(validation_result_PP_test)
else:
old_res = validation_result_raw
# these are the new dice scores
validation_result_PP_test = load_json(join(folder_per_class, "summary.json"))['results']['mean']
for c in classes:
dc_raw = old_res[str(c)]['Dice']
dc_pp = validation_result_PP_test[str(c)]['Dice']
pp_results['dc_per_class_pp_per_class'][str(c)] = dc_pp
print(c)
print("before:", dc_raw)
print("after: ", dc_pp)
if dc_pp > (dc_raw + dice_threshold):
pp_results['for_which_classes'].append(int(c))
if min_size_kept is not None:
pp_results['min_valid_object_sizes'].update({c: min_size_kept[c]})
print("Removing all but the largest region for class %d improved results!" % c)
print('min_valid_object_sizes', min_size_kept)
else:
print("Only one class present, no need to do each class separately as this is covered in fg vs bg")
if not advanced_postprocessing:
pp_results['min_valid_object_sizes'] = None
print("done")
print("for which classes:")
print(pp_results['for_which_classes'])
print("min_object_sizes")
print(pp_results['min_valid_object_sizes'])
pp_results['validation_raw'] = raw_subfolder_name
pp_results['validation_final'] = final_subf_name
# now that we have a proper for_which_classes, apply that
pred_gt_tuples = []
results = []
for f in fnames:
predicted_segmentation = join(base, raw_subfolder_name, f)
# now remove all but the largest connected component for each class
output_file = join(base, final_subf_name, f)
results.append(p.starmap_async(load_remove_save, (
(predicted_segmentation, output_file, pp_results['for_which_classes'],
pp_results['min_valid_object_sizes']),)))
pred_gt_tuples.append([output_file,
join(gt_labels_folder, f)])
_ = [i.get() for i in results]
# evaluate postprocessed predictions
_ = aggregate_scores(pred_gt_tuples, labels=classes,
json_output_file=join(base, final_subf_name, "summary.json"),
json_author="Fabian", num_threads=processes)
pp_results['min_valid_object_sizes'] = str(pp_results['min_valid_object_sizes'])
save_json(pp_results, join(base, pp_filename))
# delete temp
if not debug:
shutil.rmtree(folder_per_class)
shutil.rmtree(folder_all_classes_as_fg)
p.close()
p.join()
print("done")
def apply_postprocessing_to_folder(input_folder: str, output_folder: str, for_which_classes: list,
min_valid_object_size:dict=None, num_processes=8):
"""
applies removing of all but the largest connected component to all niftis in a folder
:param min_valid_object_size:
:param min_valid_object_size:
:param input_folder:
:param output_folder:
:param for_which_classes:
:param num_processes:
:return:
"""
maybe_mkdir_p(output_folder)
p = Pool(num_processes)
nii_files = subfiles(input_folder, suffix=".nii.gz", join=False)
input_files = [join(input_folder, i) for i in nii_files]
out_files = [join(output_folder, i) for i in nii_files]
results = p.starmap_async(load_remove_save, zip(input_files, out_files, [for_which_classes] * len(input_files),
[min_valid_object_size] * len(input_files)))
res = results.get()
p.close()
p.join()
if __name__ == "__main__":
input_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor"
output_folder = "/media/fabian/DKFZ/predictions_Fabian/Liver_and_LiverTumor_postprocessed"
for_which_classes = [(1, 2), ]
apply_postprocessing_to_folder(input_folder, output_folder, for_which_classes)
| 19,122
| 43.575758
| 127
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/add_dummy_task_with_mean_over_all_tasks.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import subfiles
import os
from collections import OrderedDict
folder = "/home/fabian/drives/E132-Projekte/Projects/2018_MedicalDecathlon/Leaderboard"
task_descriptors = ['2D final 2',
'2D final, less pool, dc and topK, fold0',
'2D final pseudo3d 7, fold0',
'2D final, less pool, dc and ce, fold0',
'3D stage0 final 2, fold0',
'3D fullres final 2, fold0']
task_ids_with_no_stage0 = ["Task001_BrainTumour", "Task004_Hippocampus", "Task005_Prostate"]
mean_scores = OrderedDict()
for t in task_descriptors:
mean_scores[t] = OrderedDict()
json_files = subfiles(folder, True, None, ".json", True)
json_files = [i for i in json_files if not i.split("/")[-1].startswith(".")] # stupid mac
for j in json_files:
with open(j, 'r') as f:
res = json.load(f)
task = res['task']
if task != "Task999_ALL":
name = res['name']
if name in task_descriptors:
if task not in list(mean_scores[name].keys()):
mean_scores[name][task] = res['results']['mean']['mean']
else:
raise RuntimeError("duplicate task %s for description %s" % (task, name))
for t in task_ids_with_no_stage0:
mean_scores["3D stage0 final 2, fold0"][t] = mean_scores["3D fullres final 2, fold0"][t]
a = set()
for i in mean_scores.keys():
a = a.union(list(mean_scores[i].keys()))
for i in mean_scores.keys():
try:
for t in list(a):
assert t in mean_scores[i].keys(), "did not find task %s for experiment %s" % (t, i)
new_res = OrderedDict()
new_res['name'] = i
new_res['author'] = "Fabian"
new_res['task'] = "Task999_ALL"
new_res['results'] = OrderedDict()
new_res['results']['mean'] = OrderedDict()
new_res['results']['mean']['mean'] = OrderedDict()
tasks = list(mean_scores[i].keys())
metrics = mean_scores[i][tasks[0]].keys()
for m in metrics:
foreground_values = [mean_scores[i][n][m] for n in tasks]
new_res['results']['mean']["mean"][m] = np.nanmean(foreground_values)
output_fname = i.replace(" ", "_") + "_globalMean.json"
with open(os.path.join(folder, output_fname), 'w') as f:
json.dump(new_res, f)
except AssertionError:
print("could not process experiment %s" % i)
print("did not find task %s for experiment %s" % (t, i))
| 3,246
| 40.628205
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/add_mean_dice_to_json.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import subfiles
from collections import OrderedDict
def foreground_mean(filename):
with open(filename, 'r') as f:
res = json.load(f)
class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean')])
class_ids = class_ids[class_ids != 0]
class_ids = class_ids[class_ids != -1]
class_ids = class_ids[class_ids != 99]
tmp = res['results']['mean'].get('99')
if tmp is not None:
_ = res['results']['mean'].pop('99')
metrics = res['results']['mean']['1'].keys()
res['results']['mean']["mean"] = OrderedDict()
for m in metrics:
foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids]
res['results']['mean']["mean"][m] = np.nanmean(foreground_values)
with open(filename, 'w') as f:
json.dump(res, f, indent=4, sort_keys=True)
def run_in_folder(folder):
json_files = subfiles(folder, True, None, ".json", True)
json_files = [i for i in json_files if not i.split("/")[-1].startswith(".") and not i.endswith("_globalMean.json")] # stupid mac
for j in json_files:
foreground_mean(j)
if __name__ == "__main__":
folder = "/media/fabian/Results/nnUNetOutput_final/summary_jsons"
run_in_folder(folder)
| 2,024
| 37.942308
| 132
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/region_based_evaluation.py
|
from copy import deepcopy
from multiprocessing.pool import Pool
from batchgenerators.utilities.file_and_folder_operations import *
from medpy import metric
import SimpleITK as sitk
import numpy as np
from nnunet.configuration import default_num_threads
from nnunet.postprocessing.consolidate_postprocessing import collect_cv_niftis
def get_brats_regions():
"""
this is only valid for the brats data in here where the labels are 1, 2, and 3. The original brats data have a
different labeling convention!
:return:
"""
regions = {
"whole tumor": (1, 2, 3),
"tumor core": (2, 3),
"enhancing tumor": (3,)
}
return regions
def get_KiTS_regions():
regions = {
"kidney incl tumor": (1, 2),
"tumor": (2,)
}
return regions
def create_region_from_mask(mask, join_labels: tuple):
mask_new = np.zeros_like(mask, dtype=np.uint8)
for l in join_labels:
mask_new[mask == l] = 1
return mask_new
def evaluate_case(file_pred: str, file_gt: str, regions):
image_gt = sitk.GetArrayFromImage(sitk.ReadImage(file_gt))
image_pred = sitk.GetArrayFromImage(sitk.ReadImage(file_pred))
results = []
for r in regions:
mask_pred = create_region_from_mask(image_pred, r)
mask_gt = create_region_from_mask(image_gt, r)
dc = np.nan if np.sum(mask_gt) == 0 and np.sum(mask_pred) == 0 else metric.dc(mask_pred, mask_gt)
results.append(dc)
return results
def evaluate_regions(folder_predicted: str, folder_gt: str, regions: dict, processes=default_num_threads):
region_names = list(regions.keys())
files_in_pred = subfiles(folder_predicted, suffix='.nii.gz', join=False)
files_in_gt = subfiles(folder_gt, suffix='.nii.gz', join=False)
have_no_gt = [i for i in files_in_pred if i not in files_in_gt]
assert len(have_no_gt) == 0, "Some files in folder_predicted have not ground truth in folder_gt"
have_no_pred = [i for i in files_in_gt if i not in files_in_pred]
if len(have_no_pred) > 0:
print("WARNING! Some files in folder_gt were not predicted (not present in folder_predicted)!")
files_in_gt.sort()
files_in_pred.sort()
# run for all cases
full_filenames_gt = [join(folder_gt, i) for i in files_in_pred]
full_filenames_pred = [join(folder_predicted, i) for i in files_in_pred]
p = Pool(processes)
res = p.starmap(evaluate_case, zip(full_filenames_pred, full_filenames_gt, [list(regions.values())] * len(files_in_gt)))
p.close()
p.join()
all_results = {r: [] for r in region_names}
with open(join(folder_predicted, 'summary.csv'), 'w') as f:
f.write("casename")
for r in region_names:
f.write(",%s" % r)
f.write("\n")
for i in range(len(files_in_pred)):
f.write(files_in_pred[i][:-7])
result_here = res[i]
for k, r in enumerate(region_names):
dc = result_here[k]
f.write(",%02.4f" % dc)
all_results[r].append(dc)
f.write("\n")
f.write('mean')
for r in region_names:
f.write(",%02.4f" % np.nanmean(all_results[r]))
f.write("\n")
f.write('median')
for r in region_names:
f.write(",%02.4f" % np.nanmedian(all_results[r]))
f.write("\n")
f.write('mean (nan is 1)')
for r in region_names:
tmp = np.array(all_results[r])
tmp[np.isnan(tmp)] = 1
f.write(",%02.4f" % np.mean(tmp))
f.write("\n")
f.write('median (nan is 1)')
for r in region_names:
tmp = np.array(all_results[r])
tmp[np.isnan(tmp)] = 1
f.write(",%02.4f" % np.median(tmp))
f.write("\n")
if __name__ == '__main__':
collect_cv_niftis('./', './cv_niftis')
evaluate_regions('./cv_niftis/', './gt_niftis/', get_brats_regions())
| 3,938
| 32.956897
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/evaluator.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import json
import hashlib
from datetime import datetime
from multiprocessing.pool import Pool
import numpy as np
import pandas as pd
import SimpleITK as sitk
from nnunet.evaluation.metrics import ConfusionMatrix, ALL_METRICS
from batchgenerators.utilities.file_and_folder_operations import save_json, subfiles, join
from collections import OrderedDict
class Evaluator:
"""Object that holds test and reference segmentations with label information
and computes a number of metrics on the two. 'labels' must either be an
iterable of numeric values (or tuples thereof) or a dictionary with string
names and numeric values.
"""
default_metrics = [
"False Positive Rate",
"Dice",
"Jaccard",
"Precision",
"Recall",
"Accuracy",
"False Omission Rate",
"Negative Predictive Value",
"False Negative Rate",
"True Negative Rate",
"False Discovery Rate",
"Total Positives Test",
"Total Positives Reference"
]
default_advanced_metrics = [
#"Hausdorff Distance",
"Hausdorff Distance 95",
#"Avg. Surface Distance",
#"Avg. Symmetric Surface Distance"
]
def __init__(self,
test=None,
reference=None,
labels=None,
metrics=None,
advanced_metrics=None,
nan_for_nonexisting=True):
self.test = None
self.reference = None
self.confusion_matrix = ConfusionMatrix()
self.labels = None
self.nan_for_nonexisting = nan_for_nonexisting
self.result = None
self.metrics = []
if metrics is None:
for m in self.default_metrics:
self.metrics.append(m)
else:
for m in metrics:
self.metrics.append(m)
self.advanced_metrics = []
if advanced_metrics is None:
for m in self.default_advanced_metrics:
self.advanced_metrics.append(m)
else:
for m in advanced_metrics:
self.advanced_metrics.append(m)
self.set_reference(reference)
self.set_test(test)
if labels is not None:
self.set_labels(labels)
else:
if test is not None and reference is not None:
self.construct_labels()
def set_test(self, test):
"""Set the test segmentation."""
self.test = test
def set_reference(self, reference):
"""Set the reference segmentation."""
self.reference = reference
def set_labels(self, labels):
"""Set the labels.
:param labels= may be a dictionary (int->str), a set (of ints), a tuple (of ints) or a list (of ints). Labels
will only have names if you pass a dictionary"""
if isinstance(labels, dict):
self.labels = collections.OrderedDict(labels)
elif isinstance(labels, set):
self.labels = list(labels)
elif isinstance(labels, np.ndarray):
self.labels = [i for i in labels]
elif isinstance(labels, (list, tuple)):
self.labels = labels
else:
raise TypeError("Can only handle dict, list, tuple, set & numpy array, but input is of type {}".format(type(labels)))
def construct_labels(self):
"""Construct label set from unique entries in segmentations."""
if self.test is None and self.reference is None:
raise ValueError("No test or reference segmentations.")
elif self.test is None:
labels = np.unique(self.reference)
else:
labels = np.union1d(np.unique(self.test),
np.unique(self.reference))
self.labels = list(map(lambda x: int(x), labels))
def set_metrics(self, metrics):
"""Set evaluation metrics"""
if isinstance(metrics, set):
self.metrics = list(metrics)
elif isinstance(metrics, (list, tuple, np.ndarray)):
self.metrics = metrics
else:
raise TypeError("Can only handle list, tuple, set & numpy array, but input is of type {}".format(type(metrics)))
def add_metric(self, metric):
if metric not in self.metrics:
self.metrics.append(metric)
def evaluate(self, test=None, reference=None, advanced=False, **metric_kwargs):
"""Compute metrics for segmentations."""
if test is not None:
self.set_test(test)
if reference is not None:
self.set_reference(reference)
if self.test is None or self.reference is None:
raise ValueError("Need both test and reference segmentations.")
if self.labels is None:
self.construct_labels()
self.metrics.sort()
# get functions for evaluation
# somewhat convoluted, but allows users to define additonal metrics
# on the fly, e.g. inside an IPython console
_funcs = {m: ALL_METRICS[m] for m in self.metrics + self.advanced_metrics}
frames = inspect.getouterframes(inspect.currentframe())
for metric in self.metrics:
for f in frames:
if metric in f[0].f_locals:
_funcs[metric] = f[0].f_locals[metric]
break
else:
if metric in _funcs:
continue
else:
raise NotImplementedError(
"Metric {} not implemented.".format(metric))
# get results
self.result = OrderedDict()
eval_metrics = self.metrics
if advanced:
eval_metrics += self.advanced_metrics
if isinstance(self.labels, dict):
for label, name in self.labels.items():
k = str(name)
self.result[k] = OrderedDict()
if not hasattr(label, "__iter__"):
self.confusion_matrix.set_test(self.test == label)
self.confusion_matrix.set_reference(self.reference == label)
else:
current_test = 0
current_reference = 0
for l in label:
current_test += (self.test == l)
current_reference += (self.reference == l)
self.confusion_matrix.set_test(current_test)
self.confusion_matrix.set_reference(current_reference)
for metric in eval_metrics:
self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,
nan_for_nonexisting=self.nan_for_nonexisting,
**metric_kwargs)
else:
for i, l in enumerate(self.labels):
k = str(l)
self.result[k] = OrderedDict()
self.confusion_matrix.set_test(self.test == l)
self.confusion_matrix.set_reference(self.reference == l)
for metric in eval_metrics:
self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,
nan_for_nonexisting=self.nan_for_nonexisting,
**metric_kwargs)
return self.result
def to_dict(self):
if self.result is None:
self.evaluate()
return self.result
def to_array(self):
"""Return result as numpy array (labels x metrics)."""
if self.result is None:
self.evaluate
result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())
a = np.zeros((len(self.labels), len(result_metrics)), dtype=np.float32)
if isinstance(self.labels, dict):
for i, label in enumerate(self.labels.keys()):
for j, metric in enumerate(result_metrics):
a[i][j] = self.result[self.labels[label]][metric]
else:
for i, label in enumerate(self.labels):
for j, metric in enumerate(result_metrics):
a[i][j] = self.result[label][metric]
return a
def to_pandas(self):
"""Return result as pandas DataFrame."""
a = self.to_array()
if isinstance(self.labels, dict):
labels = list(self.labels.values())
else:
labels = self.labels
result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())
return pd.DataFrame(a, index=labels, columns=result_metrics)
class NiftiEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
self.test_nifti = None
self.reference_nifti = None
super(NiftiEvaluator, self).__init__(*args, **kwargs)
def set_test(self, test):
"""Set the test segmentation."""
if test is not None:
self.test_nifti = sitk.ReadImage(test)
super(NiftiEvaluator, self).set_test(sitk.GetArrayFromImage(self.test_nifti))
else:
self.test_nifti = None
super(NiftiEvaluator, self).set_test(test)
def set_reference(self, reference):
"""Set the reference segmentation."""
if reference is not None:
self.reference_nifti = sitk.ReadImage(reference)
super(NiftiEvaluator, self).set_reference(sitk.GetArrayFromImage(self.reference_nifti))
else:
self.reference_nifti = None
super(NiftiEvaluator, self).set_reference(reference)
def evaluate(self, test=None, reference=None, voxel_spacing=None, **metric_kwargs):
if voxel_spacing is None:
voxel_spacing = np.array(self.test_nifti.GetSpacing())[::-1]
metric_kwargs["voxel_spacing"] = voxel_spacing
return super(NiftiEvaluator, self).evaluate(test, reference, **metric_kwargs)
def run_evaluation(args):
test, ref, evaluator, metric_kwargs = args
# evaluate
evaluator.set_test(test)
evaluator.set_reference(ref)
if evaluator.labels is None:
evaluator.construct_labels()
current_scores = evaluator.evaluate(**metric_kwargs)
if type(test) == str:
current_scores["test"] = test
if type(ref) == str:
current_scores["reference"] = ref
return current_scores
def aggregate_scores(test_ref_pairs,
evaluator=NiftiEvaluator,
labels=None,
nanmean=True,
json_output_file=None,
json_name="",
json_description="",
json_author="Fabian",
json_task="",
num_threads=2,
**metric_kwargs):
"""
test = predicted image
:param test_ref_pairs:
:param evaluator:
:param labels: must be a dict of int-> str or a list of int
:param nanmean:
:param json_output_file:
:param json_name:
:param json_description:
:param json_author:
:param json_task:
:param metric_kwargs:
:return:
"""
if type(evaluator) == type:
evaluator = evaluator()
if labels is not None:
evaluator.set_labels(labels)
all_scores = OrderedDict()
all_scores["all"] = []
all_scores["mean"] = OrderedDict()
test = [i[0] for i in test_ref_pairs]
ref = [i[1] for i in test_ref_pairs]
p = Pool(num_threads)
all_res = p.map(run_evaluation, zip(test, ref, [evaluator]*len(ref), [metric_kwargs]*len(ref)))
p.close()
p.join()
for i in range(len(all_res)):
all_scores["all"].append(all_res[i])
# append score list for mean
for label, score_dict in all_res[i].items():
if label in ("test", "reference"):
continue
if label not in all_scores["mean"]:
all_scores["mean"][label] = OrderedDict()
for score, value in score_dict.items():
if score not in all_scores["mean"][label]:
all_scores["mean"][label][score] = []
all_scores["mean"][label][score].append(value)
for label in all_scores["mean"]:
for score in all_scores["mean"][label]:
if nanmean:
all_scores["mean"][label][score] = float(np.nanmean(all_scores["mean"][label][score]))
else:
all_scores["mean"][label][score] = float(np.mean(all_scores["mean"][label][score]))
# save to file if desired
# we create a hopefully unique id by hashing the entire output dictionary
if json_output_file is not None:
json_dict = OrderedDict()
json_dict["name"] = json_name
json_dict["description"] = json_description
timestamp = datetime.today()
json_dict["timestamp"] = str(timestamp)
json_dict["task"] = json_task
json_dict["author"] = json_author
json_dict["results"] = all_scores
json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12]
save_json(json_dict, json_output_file)
return all_scores
def aggregate_scores_for_experiment(score_file,
labels=None,
metrics=Evaluator.default_metrics,
nanmean=True,
json_output_file=None,
json_name="",
json_description="",
json_author="Fabian",
json_task=""):
scores = np.load(score_file)
scores_mean = scores.mean(0)
if labels is None:
labels = list(map(str, range(scores.shape[1])))
results = []
results_mean = OrderedDict()
for i in range(scores.shape[0]):
results.append(OrderedDict())
for l, label in enumerate(labels):
results[-1][label] = OrderedDict()
results_mean[label] = OrderedDict()
for m, metric in enumerate(metrics):
results[-1][label][metric] = float(scores[i][l][m])
results_mean[label][metric] = float(scores_mean[l][m])
json_dict = OrderedDict()
json_dict["name"] = json_name
json_dict["description"] = json_description
timestamp = datetime.today()
json_dict["timestamp"] = str(timestamp)
json_dict["task"] = json_task
json_dict["author"] = json_author
json_dict["results"] = {"all": results, "mean": results_mean}
json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12]
if json_output_file is not None:
json_output_file = open(json_output_file, "w")
json.dump(json_dict, json_output_file, indent=4, separators=(",", ": "))
json_output_file.close()
return json_dict
def evaluate_folder(folder_with_gts: str, folder_with_predictions: str, labels: tuple, **metric_kwargs):
"""
writes a summary.json to folder_with_predictions
:param folder_with_gts: folder where the ground truth segmentations are saved. Must be nifti files.
:param folder_with_predictions: folder where the predicted segmentations are saved. Must be nifti files.
:param labels: tuple of int with the labels in the dataset. For example (0, 1, 2, 3) for Task001_BrainTumour.
:return:
"""
files_gt = subfiles(folder_with_gts, suffix=".nii.gz", join=False)
files_pred = subfiles(folder_with_predictions, suffix=".nii.gz", join=False)
assert all([i in files_pred for i in files_gt]), "files missing in folder_with_predictions"
assert all([i in files_gt for i in files_pred]), "files missing in folder_with_gts"
test_ref_pairs = [(join(folder_with_predictions, i), join(folder_with_gts, i)) for i in files_pred]
res = aggregate_scores(test_ref_pairs, json_output_file=join(folder_with_predictions, "summary.json"),
num_threads=8, labels=labels, **metric_kwargs)
return res
def nnunet_evaluate_folder():
import argparse
parser = argparse.ArgumentParser("Evaluates the segmentations located in the folder pred. Output of this script is "
"a json file. At the very bottom of the json file is going to be a 'mean' "
"entry with averages metrics across all cases")
parser.add_argument('-ref', required=True, type=str, help="Folder containing the reference segmentations in nifti "
"format.")
parser.add_argument('-pred', required=True, type=str, help="Folder containing the predicted segmentations in nifti "
"format. File names must match between the folders!")
parser.add_argument('-l', nargs='+', type=int, required=True, help="List of label IDs (integer values) that should "
"be evaluated. Best practice is to use all int "
"values present in the dataset, so for example "
"for LiTS the labels are 0: background, 1: "
"liver, 2: tumor. So this argument "
"should be -l 1 2. You can if you want also "
"evaluate the background label (0) but in "
"this case that would not gie any useful "
"information.")
args = parser.parse_args()
return evaluate_folder(args.ref, args.pred, args.l)
| 18,778
| 37.879917
| 129
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/collect_results_files.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from batchgenerators.utilities.file_and_folder_operations import subdirs, subfiles
def crawl_and_copy(current_folder, out_folder, prefix="fabian_", suffix="ummary.json"):
"""
This script will run recursively through all subfolders of current_folder and copy all files that end with
suffix with some automatically generated prefix into out_folder
:param current_folder:
:param out_folder:
:param prefix:
:return:
"""
s = subdirs(current_folder, join=False)
f = subfiles(current_folder, join=False)
f = [i for i in f if i.endswith(suffix)]
if current_folder.find("fold0") != -1:
for fl in f:
shutil.copy(os.path.join(current_folder, fl), os.path.join(out_folder, prefix+fl))
for su in s:
if prefix == "":
add = su
else:
add = "__" + su
crawl_and_copy(os.path.join(current_folder, su), out_folder, prefix=prefix+add)
if __name__ == "__main__":
from nnunet.paths import network_training_output_dir
output_folder = "/home/fabian/PhD/results/nnUNetV2/leaderboard"
crawl_and_copy(network_training_output_dir, output_folder)
from nnunet.evaluation.add_mean_dice_to_json import run_in_folder
run_in_folder(output_folder)
| 1,969
| 39.204082
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/surface_dice.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from medpy.metric.binary import __surface_distances
def normalized_surface_dice(a: np.ndarray, b: np.ndarray, threshold: float, spacing: tuple = None, connectivity=1):
"""
This implementation differs from the official surface dice implementation! These two are not comparable!!!!!
The normalized surface dice is symmetric, so it should not matter whether a or b is the reference image
This implementation natively supports 2D and 3D images. Whether other dimensions are supported depends on the
__surface_distances implementation in medpy
:param a: image 1, must have the same shape as b
:param b: image 2, must have the same shape as a
:param threshold: distances below this threshold will be counted as true positives. Threshold is in mm, not voxels!
(if spacing = (1, 1(, 1)) then one voxel=1mm so the threshold is effectively in voxels)
must be a tuple of len dimension(a)
:param spacing: how many mm is one voxel in reality? Can be left at None, we then assume an isotropic spacing of 1mm
:param connectivity: see scipy.ndimage.generate_binary_structure for more information. I suggest you leave that
one alone
:return:
"""
assert all([i == j for i, j in zip(a.shape, b.shape)]), "a and b must have the same shape. a.shape= %s, " \
"b.shape= %s" % (str(a.shape), str(b.shape))
if spacing is None:
spacing = tuple([1 for _ in range(len(a.shape))])
a_to_b = __surface_distances(a, b, spacing, connectivity)
b_to_a = __surface_distances(b, a, spacing, connectivity)
numel_a = len(a_to_b)
numel_b = len(b_to_a)
tp_a = np.sum(a_to_b <= threshold) / numel_a
tp_b = np.sum(b_to_a <= threshold) / numel_b
fp = np.sum(a_to_b > threshold) / numel_a
fn = np.sum(b_to_a > threshold) / numel_b
dc = (tp_a + tp_b) / (tp_a + tp_b + fp + fn + 1e-8) # 1e-8 just so that we don't get div by 0
return dc
| 2,686
| 45.327586
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/metrics.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from medpy import metric
def assert_shape(test, reference):
assert test.shape == reference.shape, "Shape mismatch: {} and {}".format(
test.shape, reference.shape)
class ConfusionMatrix:
def __init__(self, test=None, reference=None):
self.tp = None
self.fp = None
self.tn = None
self.fn = None
self.size = None
self.reference_empty = None
self.reference_full = None
self.test_empty = None
self.test_full = None
self.set_reference(reference)
self.set_test(test)
def set_test(self, test):
self.test = test
self.reset()
def set_reference(self, reference):
self.reference = reference
self.reset()
def reset(self):
self.tp = None
self.fp = None
self.tn = None
self.fn = None
self.size = None
self.test_empty = None
self.test_full = None
self.reference_empty = None
self.reference_full = None
def compute(self):
if self.test is None or self.reference is None:
raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.")
assert_shape(self.test, self.reference)
self.tp = int(((self.test != 0) * (self.reference != 0)).sum())
self.fp = int(((self.test != 0) * (self.reference == 0)).sum())
self.tn = int(((self.test == 0) * (self.reference == 0)).sum())
self.fn = int(((self.test == 0) * (self.reference != 0)).sum())
self.size = int(np.prod(self.reference.shape, dtype=np.int64))
self.test_empty = not np.any(self.test)
self.test_full = np.all(self.test)
self.reference_empty = not np.any(self.reference)
self.reference_full = np.all(self.reference)
def get_matrix(self):
for entry in (self.tp, self.fp, self.tn, self.fn):
if entry is None:
self.compute()
break
return self.tp, self.fp, self.tn, self.fn
def get_size(self):
if self.size is None:
self.compute()
return self.size
def get_existence(self):
for case in (self.test_empty, self.test_full, self.reference_empty, self.reference_full):
if case is None:
self.compute()
break
return self.test_empty, self.test_full, self.reference_empty, self.reference_full
def dice(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""2TP / (2TP + FP + FN)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty and reference_empty:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(2. * tp / (2 * tp + fp + fn))
def jaccard(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TP / (TP + FP + FN)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty and reference_empty:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(tp / (tp + fp + fn))
def precision(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TP / (TP + FP)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(tp / (tp + fp))
def sensitivity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TP / (TP + FN)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if reference_empty:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(tp / (tp + fn))
def recall(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TP / (TP + FN)"""
return sensitivity(test, reference, confusion_matrix, nan_for_nonexisting, **kwargs)
def specificity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TN / (TN + FP)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if reference_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(tn / (tn + fp))
def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs):
"""(TP + TN) / (TP + FP + FN + TN)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
return float((tp + tn) / (tp + fp + tn + fn))
def fscore(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, beta=1., **kwargs):
"""(1 + b^2) * TP / ((1 + b^2) * TP + b^2 * FN + FP)"""
precision_ = precision(test, reference, confusion_matrix, nan_for_nonexisting)
recall_ = recall(test, reference, confusion_matrix, nan_for_nonexisting)
return (1 + beta*beta) * precision_ * recall_ /\
((beta*beta * precision_) + recall_)
def false_positive_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""FP / (FP + TN)"""
return 1 - specificity(test, reference, confusion_matrix, nan_for_nonexisting)
def false_omission_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""FN / (TN + FN)"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0.
return float(fn / (fn + tn))
def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""FN / (TP + FN)"""
return 1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting)
def true_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TN / (TN + FP)"""
return specificity(test, reference, confusion_matrix, nan_for_nonexisting)
def false_discovery_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""FP / (TP + FP)"""
return 1 - precision(test, reference, confusion_matrix, nan_for_nonexisting)
def negative_predictive_value(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
"""TN / (TN + FN)"""
return 1 - false_omission_rate(test, reference, confusion_matrix, nan_for_nonexisting)
def total_positives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
"""TP + FP"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
return tp + fp
def total_negatives_test(test=None, reference=None, confusion_matrix=None, **kwargs):
"""TN + FN"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
return tn + fn
def total_positives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
"""TP + FN"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
return tp + fn
def total_negatives_reference(test=None, reference=None, confusion_matrix=None, **kwargs):
"""TN + FP"""
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
tp, fp, tn, fn = confusion_matrix.get_matrix()
return tn + fp
def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty or test_full or reference_empty or reference_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0
test, reference = confusion_matrix.test, confusion_matrix.reference
return metric.hd(test, reference, voxel_spacing, connectivity)
def hausdorff_distance_95(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty or test_full or reference_empty or reference_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0
test, reference = confusion_matrix.test, confusion_matrix.reference
return metric.hd95(test, reference, voxel_spacing, connectivity)
def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty or test_full or reference_empty or reference_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0
test, reference = confusion_matrix.test, confusion_matrix.reference
return metric.asd(test, reference, voxel_spacing, connectivity)
def avg_surface_distance_symmetric(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs):
if confusion_matrix is None:
confusion_matrix = ConfusionMatrix(test, reference)
test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence()
if test_empty or test_full or reference_empty or reference_full:
if nan_for_nonexisting:
return float("NaN")
else:
return 0
test, reference = confusion_matrix.test, confusion_matrix.reference
return metric.assd(test, reference, voxel_spacing, connectivity)
ALL_METRICS = {
"False Positive Rate": false_positive_rate,
"Dice": dice,
"Jaccard": jaccard,
"Hausdorff Distance": hausdorff_distance,
"Hausdorff Distance 95": hausdorff_distance_95,
"Precision": precision,
"Recall": recall,
"Avg. Symmetric Surface Distance": avg_surface_distance_symmetric,
"Avg. Surface Distance": avg_surface_distance,
"Accuracy": accuracy,
"False Omission Rate": false_omission_rate,
"Negative Predictive Value": negative_predictive_value,
"False Negative Rate": false_negative_rate,
"True Negative Rate": true_negative_rate,
"False Discovery Rate": false_discovery_rate,
"Total Positives Test": total_positives_test,
"Total Negatives Test": total_negatives_test,
"Total Positives Reference": total_positives_reference,
"total Negatives Reference": total_negatives_reference
}
| 13,031
| 31.019656
| 157
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/rank_candidates_cascade.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
if __name__ == "__main__":
# run collect_all_fold0_results_and_summarize_in_one_csv.py first
summary_files_dir = join(network_training_output_dir, "summary_jsons_fold0_new")
output_file = join(network_training_output_dir, "summary_cascade.csv")
folds = (0, )
folds_str = ""
for f in folds:
folds_str += str(f)
plans = "nnUNetPlansv2.1"
overwrite_plans = {
'nnUNetTrainerCascadeFullRes': ['nnUNetPlans'],
}
trainers = [
'nnUNetTrainerCascadeFullRes',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess2',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess3',
'nnUNetTrainerV2CascadeFullRes_lowerLR',
'nnUNetTrainerV2CascadeFullRes',
'nnUNetTrainerV2CascadeFullRes_noConnComp',
'nnUNetTrainerV2CascadeFullRes_shorter_lowerLR',
'nnUNetTrainerV2CascadeFullRes_shorter',
'nnUNetTrainerV2CascadeFullRes_smallerBinStrel',
#'',
#'',
#'',
#'',
#'',
#'',
]
datasets = \
{
"Task003_Liver": ("3d_cascade_fullres", ),
"Task006_Lung": ("3d_cascade_fullres", ),
"Task007_Pancreas": ("3d_cascade_fullres", ),
"Task008_HepaticVessel": ("3d_cascade_fullres", ),
"Task009_Spleen": ("3d_cascade_fullres", ),
"Task010_Colon": ("3d_cascade_fullres", ),
"Task017_AbdominalOrganSegmentation": ("3d_cascade_fullres", ),
#"Task029_LITS": ("3d_cascade_fullres", ),
"Task048_KiTS_clean": ("3d_cascade_fullres", ),
"Task055_SegTHOR": ("3d_cascade_fullres", ),
"Task056_VerSe": ("3d_cascade_fullres", ),
#"": ("3d_cascade_fullres", ),
}
expected_validation_folder = "validation_raw"
alternative_validation_folder = "validation"
alternative_alternative_validation_folder = "validation_tiledTrue_doMirror_True"
interested_in = "mean"
result_per_dataset = {}
for d in datasets:
result_per_dataset[d] = {}
for c in datasets[d]:
result_per_dataset[d][c] = []
valid_trainers = []
all_trainers = []
with open(output_file, 'w') as f:
f.write("trainer,")
for t in datasets.keys():
s = t[4:7]
for c in datasets[t]:
s1 = s + "_" + c[3]
f.write("%s," % s1)
f.write("\n")
for trainer in trainers:
trainer_plans = [plans]
if trainer in overwrite_plans.keys():
trainer_plans = overwrite_plans[trainer]
result_per_dataset_here = {}
for d in datasets:
result_per_dataset_here[d] = {}
for p in trainer_plans:
name = "%s__%s" % (trainer, p)
all_present = True
all_trainers.append(name)
f.write("%s," % name)
for dataset in datasets.keys():
for configuration in datasets[dataset]:
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, expected_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, alternative_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (
dataset, configuration, trainer, p, alternative_alternative_validation_folder, folds_str))
if not isfile(summary_file):
all_present = False
print(name, dataset, configuration, "has missing summary file")
if isfile(summary_file):
result = load_json(summary_file)['results'][interested_in]['mean']['Dice']
result_per_dataset_here[dataset][configuration] = result
f.write("%02.4f," % result)
else:
f.write("NA,")
result_per_dataset_here[dataset][configuration] = 0
f.write("\n")
if True:
valid_trainers.append(name)
for d in datasets:
for c in datasets[d]:
result_per_dataset[d][c].append(result_per_dataset_here[d][c])
invalid_trainers = [i for i in all_trainers if i not in valid_trainers]
num_valid = len(valid_trainers)
num_datasets = len(datasets.keys())
# create an array that is trainer x dataset. If more than one configuration is there then use the best metric across the two
all_res = np.zeros((num_valid, num_datasets))
for j, d in enumerate(datasets.keys()):
ks = list(result_per_dataset[d].keys())
tmp = result_per_dataset[d][ks[0]]
for k in ks[1:]:
for i in range(len(tmp)):
tmp[i] = max(tmp[i], result_per_dataset[d][k][i])
all_res[:, j] = tmp
ranks_arr = np.zeros_like(all_res)
for d in range(ranks_arr.shape[1]):
temp = np.argsort(all_res[:, d])[::-1] # inverse because we want the highest dice to be rank0
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp))
ranks_arr[:, d] = ranks
mn = np.mean(ranks_arr, 1)
for i in np.argsort(mn):
print(mn[i], valid_trainers[i])
print()
print(valid_trainers[np.argmin(mn)])
| 6,565
| 38.793939
| 178
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/rank_candidates.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
if __name__ == "__main__":
# run collect_all_fold0_results_and_summarize_in_one_csv.py first
summary_files_dir = join(network_training_output_dir, "summary_jsons_fold0_new")
output_file = join(network_training_output_dir, "summary.csv")
folds = (0, )
folds_str = ""
for f in folds:
folds_str += str(f)
plans = "nnUNetPlans"
overwrite_plans = {
'nnUNetTrainerV2_2': ["nnUNetPlans", "nnUNetPlansisoPatchesInVoxels"], # r
'nnUNetTrainerV2': ["nnUNetPlansnonCT", "nnUNetPlansCT2", "nnUNetPlansallConv3x3",
"nnUNetPlansfixedisoPatchesInVoxels", "nnUNetPlanstargetSpacingForAnisoAxis",
"nnUNetPlanspoolBasedOnSpacing", "nnUNetPlansfixedisoPatchesInmm", "nnUNetPlansv2.1"],
'nnUNetTrainerV2_warmup': ["nnUNetPlans", "nnUNetPlansv2.1", "nnUNetPlansv2.1_big", "nnUNetPlansv2.1_verybig"],
'nnUNetTrainerV2_cycleAtEnd': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_cycleAtEnd2': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_reduceMomentumDuringTraining': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_graduallyTransitionFromCEToDice': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_independentScalePerAxis': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_Mish': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_Ranger_lr3en4': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_GN': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_momentum098': ["nnUNetPlans", "nnUNetPlansv2.1"],
'nnUNetTrainerV2_momentum09': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_DP': ["nnUNetPlansv2.1_verybig"],
'nnUNetTrainerV2_DDP': ["nnUNetPlansv2.1_verybig"],
'nnUNetTrainerV2_FRN': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_resample33': ["nnUNetPlansv2.3"],
'nnUNetTrainerV2_O2': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ResencUNet': ["nnUNetPlans_FabiansResUNet_v2.1"],
'nnUNetTrainerV2_DA2': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_allConv3x3': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ForceBD': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ForceSD': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_LReLU_slope_2en1': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_lReLU_convReLUIN': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ReLU': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ReLU_biasInSegOutput': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_ReLU_convReLUIN': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_lReLU_biasInSegOutput': ["nnUNetPlansv2.1"],
#'nnUNetTrainerV2_Loss_MCC': ["nnUNetPlansv2.1"],
#'nnUNetTrainerV2_Loss_MCCnoBG': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_Loss_DicewithBG': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_Loss_Dice_LR1en3': ["nnUNetPlansv2.1"],
'nnUNetTrainerV2_Loss_Dice': ["nnUNetPlans", "nnUNetPlansv2.1"],
'nnUNetTrainerV2_Loss_DicewithBG_LR1en3': ["nnUNetPlansv2.1"],
# 'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
# 'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
# 'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
# 'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
# 'nnUNetTrainerV2_fp32': ["nnUNetPlansv2.1"],
}
trainers = ['nnUNetTrainer'] + ['nnUNetTrainerNewCandidate%d' % i for i in range(1, 28)] + [
'nnUNetTrainerNewCandidate24_2',
'nnUNetTrainerNewCandidate24_3',
'nnUNetTrainerNewCandidate26_2',
'nnUNetTrainerNewCandidate27_2',
'nnUNetTrainerNewCandidate23_always3DDA',
'nnUNetTrainerNewCandidate23_corrInit',
'nnUNetTrainerNewCandidate23_noOversampling',
'nnUNetTrainerNewCandidate23_softDS',
'nnUNetTrainerNewCandidate23_softDS2',
'nnUNetTrainerNewCandidate23_softDS3',
'nnUNetTrainerNewCandidate23_softDS4',
'nnUNetTrainerNewCandidate23_2_fp16',
'nnUNetTrainerNewCandidate23_2',
'nnUNetTrainerVer2',
'nnUNetTrainerV2_2',
'nnUNetTrainerV2_3',
'nnUNetTrainerV2_3_CE_GDL',
'nnUNetTrainerV2_3_dcTopk10',
'nnUNetTrainerV2_3_dcTopk20',
'nnUNetTrainerV2_3_fp16',
'nnUNetTrainerV2_3_softDS4',
'nnUNetTrainerV2_3_softDS4_clean',
'nnUNetTrainerV2_3_softDS4_clean_improvedDA',
'nnUNetTrainerV2_3_softDS4_clean_improvedDA_newElDef',
'nnUNetTrainerV2_3_softDS4_radam',
'nnUNetTrainerV2_3_softDS4_radam_lowerLR',
'nnUNetTrainerV2_2_schedule',
'nnUNetTrainerV2_2_schedule2',
'nnUNetTrainerV2_2_clean',
'nnUNetTrainerV2_2_clean_improvedDA_newElDef',
'nnUNetTrainerV2_2_fixes', # running
'nnUNetTrainerV2_BN', # running
'nnUNetTrainerV2_noDeepSupervision', # running
'nnUNetTrainerV2_softDeepSupervision', # running
'nnUNetTrainerV2_noDataAugmentation', # running
'nnUNetTrainerV2_Loss_CE', # running
'nnUNetTrainerV2_Loss_CEGDL',
'nnUNetTrainerV2_Loss_Dice',
'nnUNetTrainerV2_Loss_DiceTopK10',
'nnUNetTrainerV2_Loss_TopK10',
'nnUNetTrainerV2_Adam', # running
'nnUNetTrainerV2_Adam_nnUNetTrainerlr', # running
'nnUNetTrainerV2_SGD_ReduceOnPlateau', # running
'nnUNetTrainerV2_SGD_lr1en1', # running
'nnUNetTrainerV2_SGD_lr1en3', # running
'nnUNetTrainerV2_fixedNonlin', # running
'nnUNetTrainerV2_GeLU', # running
'nnUNetTrainerV2_3ConvPerStage',
'nnUNetTrainerV2_NoNormalization',
'nnUNetTrainerV2_Adam_ReduceOnPlateau',
'nnUNetTrainerV2_fp16',
'nnUNetTrainerV2', # see overwrite_plans
'nnUNetTrainerV2_noMirroring',
'nnUNetTrainerV2_momentum09',
'nnUNetTrainerV2_momentum095',
'nnUNetTrainerV2_momentum098',
'nnUNetTrainerV2_warmup',
'nnUNetTrainerV2_Loss_Dice_LR1en3',
'nnUNetTrainerV2_NoNormalization_lr1en3',
'nnUNetTrainerV2_Loss_Dice_squared',
'nnUNetTrainerV2_newElDef',
'nnUNetTrainerV2_fp32',
'nnUNetTrainerV2_cycleAtEnd',
'nnUNetTrainerV2_reduceMomentumDuringTraining',
'nnUNetTrainerV2_graduallyTransitionFromCEToDice',
'nnUNetTrainerV2_insaneDA',
'nnUNetTrainerV2_independentScalePerAxis',
'nnUNetTrainerV2_Mish',
'nnUNetTrainerV2_Ranger_lr3en4',
'nnUNetTrainerV2_cycleAtEnd2',
'nnUNetTrainerV2_GN',
'nnUNetTrainerV2_DP',
'nnUNetTrainerV2_FRN',
'nnUNetTrainerV2_resample33',
'nnUNetTrainerV2_O2',
'nnUNetTrainerV2_ResencUNet',
'nnUNetTrainerV2_DA2',
'nnUNetTrainerV2_allConv3x3',
'nnUNetTrainerV2_ForceBD',
'nnUNetTrainerV2_ForceSD',
'nnUNetTrainerV2_ReLU',
'nnUNetTrainerV2_LReLU_slope_2en1',
'nnUNetTrainerV2_lReLU_convReLUIN',
'nnUNetTrainerV2_ReLU_biasInSegOutput',
'nnUNetTrainerV2_ReLU_convReLUIN',
'nnUNetTrainerV2_lReLU_biasInSegOutput',
'nnUNetTrainerV2_Loss_DicewithBG_LR1en3',
#'nnUNetTrainerV2_Loss_MCCnoBG',
'nnUNetTrainerV2_Loss_DicewithBG',
# 'nnUNetTrainerV2_Loss_Dice_LR1en3',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
# 'nnUNetTrainerV2_Ranger_lr3en4',
]
datasets = \
{"Task001_BrainTumour": ("3d_fullres", ),
"Task002_Heart": ("3d_fullres",),
#"Task024_Promise": ("3d_fullres",),
#"Task027_ACDC": ("3d_fullres",),
"Task003_Liver": ("3d_fullres", "3d_lowres"),
"Task004_Hippocampus": ("3d_fullres",),
"Task005_Prostate": ("3d_fullres",),
"Task006_Lung": ("3d_fullres", "3d_lowres"),
"Task007_Pancreas": ("3d_fullres", "3d_lowres"),
"Task008_HepaticVessel": ("3d_fullres", "3d_lowres"),
"Task009_Spleen": ("3d_fullres", "3d_lowres"),
"Task010_Colon": ("3d_fullres", "3d_lowres"),}
expected_validation_folder = "validation_raw"
alternative_validation_folder = "validation"
alternative_alternative_validation_folder = "validation_tiledTrue_doMirror_True"
interested_in = "mean"
result_per_dataset = {}
for d in datasets:
result_per_dataset[d] = {}
for c in datasets[d]:
result_per_dataset[d][c] = []
valid_trainers = []
all_trainers = []
with open(output_file, 'w') as f:
f.write("trainer,")
for t in datasets.keys():
s = t[4:7]
for c in datasets[t]:
s1 = s + "_" + c[3]
f.write("%s," % s1)
f.write("\n")
for trainer in trainers:
trainer_plans = [plans]
if trainer in overwrite_plans.keys():
trainer_plans = overwrite_plans[trainer]
result_per_dataset_here = {}
for d in datasets:
result_per_dataset_here[d] = {}
for p in trainer_plans:
name = "%s__%s" % (trainer, p)
all_present = True
all_trainers.append(name)
f.write("%s," % name)
for dataset in datasets.keys():
for configuration in datasets[dataset]:
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, expected_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, alternative_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (
dataset, configuration, trainer, p, alternative_alternative_validation_folder, folds_str))
if not isfile(summary_file):
all_present = False
print(name, dataset, configuration, "has missing summary file")
if isfile(summary_file):
result = load_json(summary_file)['results'][interested_in]['mean']['Dice']
result_per_dataset_here[dataset][configuration] = result
f.write("%02.4f," % result)
else:
f.write("NA,")
result_per_dataset_here[dataset][configuration] = 0
f.write("\n")
if True:
valid_trainers.append(name)
for d in datasets:
for c in datasets[d]:
result_per_dataset[d][c].append(result_per_dataset_here[d][c])
invalid_trainers = [i for i in all_trainers if i not in valid_trainers]
num_valid = len(valid_trainers)
num_datasets = len(datasets.keys())
# create an array that is trainer x dataset. If more than one configuration is there then use the best metric across the two
all_res = np.zeros((num_valid, num_datasets))
for j, d in enumerate(datasets.keys()):
ks = list(result_per_dataset[d].keys())
tmp = result_per_dataset[d][ks[0]]
for k in ks[1:]:
for i in range(len(tmp)):
tmp[i] = max(tmp[i], result_per_dataset[d][k][i])
all_res[:, j] = tmp
ranks_arr = np.zeros_like(all_res)
for d in range(ranks_arr.shape[1]):
temp = np.argsort(all_res[:, d])[::-1] # inverse because we want the highest dice to be rank0
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp))
ranks_arr[:, d] = ranks
mn = np.mean(ranks_arr, 1)
for i in np.argsort(mn):
print(mn[i], valid_trainers[i])
print()
print(valid_trainers[np.argmin(mn)])
| 13,165
| 43.630508
| 178
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/rank_candidates_StructSeg.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
if __name__ == "__main__":
# run collect_all_fold0_results_and_summarize_in_one_csv.py first
summary_files_dir = join(network_training_output_dir, "summary_jsons_new")
output_file = join(network_training_output_dir, "summary_structseg_5folds.csv")
folds = (0, 1, 2, 3, 4)
folds_str = ""
for f in folds:
folds_str += str(f)
plans = "nnUNetPlans"
overwrite_plans = {
'nnUNetTrainerV2_2': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_2_noMirror': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_lessMomentum_noMirror': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_2_structSeg_noMirror': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_2_structSeg': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_lessMomentum_noMirror_structSeg': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_FabiansResUNet_structSet_NoMirror_leakyDecoder': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_FabiansResUNet_structSet_NoMirror': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
'nnUNetTrainerV2_FabiansResUNet_structSet': ["nnUNetPlans", "nnUNetPlans_customClip"], # r
}
trainers = ['nnUNetTrainer'] + [
'nnUNetTrainerV2_2',
'nnUNetTrainerV2_lessMomentum_noMirror',
'nnUNetTrainerV2_2_noMirror',
'nnUNetTrainerV2_2_structSeg_noMirror',
'nnUNetTrainerV2_2_structSeg',
'nnUNetTrainerV2_lessMomentum_noMirror_structSeg',
'nnUNetTrainerV2_FabiansResUNet_structSet_NoMirror_leakyDecoder',
'nnUNetTrainerV2_FabiansResUNet_structSet_NoMirror',
'nnUNetTrainerV2_FabiansResUNet_structSet',
]
datasets = \
{"Task049_StructSeg2019_Task1_HaN_OAR": ("3d_fullres", "3d_lowres", "2d"),
"Task050_StructSeg2019_Task2_Naso_GTV": ("3d_fullres", "3d_lowres", "2d"),
"Task051_StructSeg2019_Task3_Thoracic_OAR": ("3d_fullres", "3d_lowres", "2d"),
"Task052_StructSeg2019_Task4_Lung_GTV": ("3d_fullres", "3d_lowres", "2d"),
}
expected_validation_folder = "validation_raw"
alternative_validation_folder = "validation"
alternative_alternative_validation_folder = "validation_tiledTrue_doMirror_True"
interested_in = "mean"
result_per_dataset = {}
for d in datasets:
result_per_dataset[d] = {}
for c in datasets[d]:
result_per_dataset[d][c] = []
valid_trainers = []
all_trainers = []
with open(output_file, 'w') as f:
f.write("trainer,")
for t in datasets.keys():
s = t[4:7]
for c in datasets[t]:
if len(c) > 3:
n = c[3]
else:
n = "2"
s1 = s + "_" + n
f.write("%s," % s1)
f.write("\n")
for trainer in trainers:
trainer_plans = [plans]
if trainer in overwrite_plans.keys():
trainer_plans = overwrite_plans[trainer]
result_per_dataset_here = {}
for d in datasets:
result_per_dataset_here[d] = {}
for p in trainer_plans:
name = "%s__%s" % (trainer, p)
all_present = True
all_trainers.append(name)
f.write("%s," % name)
for dataset in datasets.keys():
for configuration in datasets[dataset]:
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, expected_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, alternative_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (
dataset, configuration, trainer, p, alternative_alternative_validation_folder, folds_str))
if not isfile(summary_file):
all_present = False
print(name, dataset, configuration, "has missing summary file")
if isfile(summary_file):
result = load_json(summary_file)['results'][interested_in]['mean']['Dice']
result_per_dataset_here[dataset][configuration] = result
f.write("%02.4f," % result)
else:
f.write("NA,")
f.write("\n")
if all_present:
valid_trainers.append(name)
for d in datasets:
for c in datasets[d]:
result_per_dataset[d][c].append(result_per_dataset_here[d][c])
invalid_trainers = [i for i in all_trainers if i not in valid_trainers]
num_valid = len(valid_trainers)
num_datasets = len(datasets.keys())
# create an array that is trainer x dataset. If more than one configuration is there then use the best metric across the two
all_res = np.zeros((num_valid, num_datasets))
for j, d in enumerate(datasets.keys()):
ks = list(result_per_dataset[d].keys())
tmp = result_per_dataset[d][ks[0]]
for k in ks[1:]:
for i in range(len(tmp)):
tmp[i] = max(tmp[i], result_per_dataset[d][k][i])
all_res[:, j] = tmp
ranks_arr = np.zeros_like(all_res)
for d in range(ranks_arr.shape[1]):
temp = np.argsort(all_res[:, d])[::-1] # inverse because we want the highest dice to be rank0
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp))
ranks_arr[:, d] = ranks
mn = np.mean(ranks_arr, 1)
for i in np.argsort(mn):
print(mn[i], valid_trainers[i])
print()
print(valid_trainers[np.argmin(mn)])
| 6,992
| 42.70625
| 178
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/figure_out_what_to_submit.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import combinations
import nnunet
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.evaluation.add_mean_dice_to_json import foreground_mean
from nnunet.evaluation.model_selection.ensemble import ensemble
from nnunet.paths import network_training_output_dir
import numpy as np
from subprocess import call
from nnunet.postprocessing.consolidate_postprocessing import consolidate_folds
from nnunet.utilities.folder_names import get_output_folder_name
from nnunet.paths import default_cascade_trainer, default_trainer, default_plans_identifier
def find_task_name(folder, task_id):
candidates = subdirs(folder, prefix="Task%03.0d_" % task_id, join=False)
assert len(candidates) > 0, "no candidate for Task id %d found in folder %s" % (task_id, folder)
assert len(candidates) == 1, "more than one candidate for Task id %d found in folder %s" % (task_id, folder)
return candidates[0]
def get_mean_foreground_dice(json_file):
results = load_json(json_file)
return get_foreground_mean(results)
def get_foreground_mean(results):
results_mean = results['results']['mean']
dice_scores = [results_mean[i]['Dice'] for i in results_mean.keys() if i != "0" and i != 'mean']
return np.mean(dice_scores)
def main():
import argparse
parser = argparse.ArgumentParser(usage="This is intended to identify the best model based on the five fold "
"cross-validation. Running this script requires all models to have been run "
"already. This script will summarize the results of the five folds of all "
"models in one json each for easy interpretability")
parser.add_argument("-m", '--models', nargs="+", required=False, default=['2d', '3d_lowres', '3d_fullres',
'3d_cascade_fullres'])
parser.add_argument("-t", '--task_ids', nargs="+", required=True)
parser.add_argument("-tr", type=str, required=False, default=default_trainer,
help="nnUNetTrainer class. Default: %s" % default_trainer)
parser.add_argument("-ctr", type=str, required=False, default=default_cascade_trainer,
help="nnUNetTrainer class for cascade model. Default: %s" % default_cascade_trainer)
parser.add_argument("-pl", type=str, required=False, default=default_plans_identifier,
help="plans name, Default: %s" % default_plans_identifier)
parser.add_argument('-f', '--folds', nargs='+', default=(0, 1, 2, 3, 4), help="use this if you have non-standard folds")
parser.add_argument("--strict", required=False, default=False, action="store_true",
help="set this flag if you want this script to crash of one of the models is missing")
args = parser.parse_args()
tasks = [int(i) for i in args.task_ids]
models = args.models
tr = args.tr
trc = args.ctr
strict = args.strict
pl = args.pl
folds = tuple(int(i) for i in args.folds)
validation_folder = "validation_raw"
# this script now acts independently from the summary jsons. That was unnecessary
id_task_mapping = {}
# for each task, run ensembling using all combinations of two models
for t in tasks:
# first collect pure model performance (postprocessed)
results = {}
all_results = {}
valid_models = []
for m in models:
try:
if m == "3d_cascade_fullres":
trainer = trc
else:
trainer = tr
if t not in id_task_mapping.keys():
task_name = find_task_name(get_output_folder_name(m), t)
id_task_mapping[t] = task_name
output_folder = get_output_folder_name(m, id_task_mapping[t], trainer, pl)
assert isdir(output_folder), "Output folder for model %s is missing, expected: %s" % (m, output_folder)
# we need a postprocessing_json for inference, so that must be present
postprocessing_json = join(output_folder, "postprocessing.json")
# we need cv_niftis_postprocessed to know the single model performance
cv_niftis_folder = join(output_folder, "cv_niftis_raw")
if not isfile(postprocessing_json) or not isdir(cv_niftis_folder):
print("running missing postprocessing for %s and model %s" % (id_task_mapping[t], m))
consolidate_folds(output_folder, folds=folds)
assert isfile(postprocessing_json), "Postprocessing json missing, expected: %s" % postprocessing_json
assert isdir(cv_niftis_folder), "Folder with niftis from CV missing, expected: %s" % cv_niftis_folder
# obtain mean foreground dice
summary_file = join(cv_niftis_folder, "summary.json")
results[m] = get_mean_foreground_dice(summary_file)
foreground_mean(summary_file)
all_results[m] = load_json(summary_file)['results']['mean']
valid_models.append(m)
except Exception as e:
if strict:
raise e
else:
print("WARNING!")
print(e)
# now run ensembling and add ensembling to results
print("\nFound the following valid models:\n", valid_models)
if len(valid_models) > 1:
for m1, m2 in combinations(valid_models, 2):
trainer_m1 = trc if m1 == "3d_cascade_fullres" else tr
trainer_m2 = trc if m2 == "3d_cascade_fullres" else tr
ensemble_name = "ensemble_" + m1 + "__" + trainer_m1 + "__" + pl + "--" + m2 + "__" + trainer_m2 + "__" + pl
output_folder_base = join(network_training_output_dir, "ensembles", id_task_mapping[t], ensemble_name)
maybe_mkdir_p(output_folder_base)
network1_folder = get_output_folder_name(m1, id_task_mapping[t], trainer_m1, pl)
network2_folder = get_output_folder_name(m2, id_task_mapping[t], trainer_m2, pl)
print("ensembling", network1_folder, network2_folder)
ensemble(network1_folder, network2_folder, output_folder_base, id_task_mapping[t], validation_folder, folds)
# ensembling will automatically do postprocessingget_foreground_mean
# now get result of ensemble
results[ensemble_name] = get_mean_foreground_dice(join(output_folder_base, "ensembled_raw", "summary.json"))
summary_file = join(output_folder_base, "ensembled_raw", "summary.json")
foreground_mean(summary_file)
all_results[ensemble_name] = load_json(summary_file)['results']['mean']
# now print all mean foreground dice and highlight the best
foreground_dices = list(results.values())
best = np.max(foreground_dices)
for k, v in results.items():
print(k, v)
predict_str = ""
best_model = None
for k, v in results.items():
if v == best:
print("%s submit model %s" % (id_task_mapping[t], k), v)
best_model = k
print("\nHere is how you should predict test cases. Run in sequential order and replace all input and output folder names with your personalized ones\n")
if k.startswith("ensemble"):
tmp = k[len("ensemble_"):]
model1, model2 = tmp.split("--")
m1, t1, pl1 = model1.split("__")
m2, t2, pl2 = model2.split("__")
predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL1 -tr " + tr + " -ctr " + trc + " -m " + m1 + " -p " + pl + " -t " + \
id_task_mapping[t] + "\n"
predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL2 -tr " + tr + " -ctr " + trc + " -m " + m2 + " -p " + pl + " -t " + \
id_task_mapping[t] + "\n"
predict_str += "nnUNet_ensemble -f OUTPUT_FOLDER_MODEL1 OUTPUT_FOLDER_MODEL2 -o OUTPUT_FOLDER -pp " + join(network_training_output_dir, "ensembles", id_task_mapping[t], k, "postprocessing.json") + "\n"
else:
predict_str += "nnUNet_predict -i FOLDER_WITH_TEST_CASES -o OUTPUT_FOLDER_MODEL1 -tr " + tr + " -ctr " + trc + " -m " + k + " -p " + pl + " -t " + \
id_task_mapping[t] + "\n"
print(predict_str)
summary_folder = join(network_training_output_dir, "ensembles", id_task_mapping[t])
maybe_mkdir_p(summary_folder)
with open(join(summary_folder, "prediction_commands.txt"), 'w') as f:
f.write(predict_str)
num_classes = len([i for i in all_results[best_model].keys() if i != 'mean'])
with open(join(summary_folder, "summary.csv"), 'w') as f:
f.write("model")
for c in range(1, num_classes):
f.write(",class%d" % c)
f.write(",average")
f.write("\n")
for m in all_results.keys():
f.write(m)
for c in range(1, num_classes):
f.write(",%01.4f" % all_results[m][str(c)]["Dice"])
f.write(",%01.4f" % all_results[m]['mean']["Dice"])
f.write("\n")
if __name__ == "__main__":
main()
| 10,376
| 50.371287
| 221
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/summarize_results_with_plans.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
import os
from nnunet.evaluation.model_selection.summarize_results_in_one_json import summarize
from nnunet.paths import network_training_output_dir
import numpy as np
def list_to_string(l, delim=","):
st = "%03.3f" % l[0]
for i in l[1:]:
st += delim + "%03.3f" % i
return st
def write_plans_to_file(f, plans_file, stage=0, do_linebreak_at_end=True, override_name=None):
a = load_pickle(plans_file)
stages = list(a['plans_per_stage'].keys())
stages.sort()
patch_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['patch_size'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
median_patient_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
if override_name is None:
f.write(plans_file.split("/")[-2] + "__" + plans_file.split("/")[-1])
else:
f.write(override_name)
f.write(";%d" % stage)
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['batch_size']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['num_pool_per_axis']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['patch_size']))
f.write(";%s" % list_to_string(patch_size_in_mm))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels']))
f.write(";%s" % list_to_string(median_patient_size_in_mm))
f.write(";%s" % list_to_string(a['plans_per_stage'][stages[stage]]['current_spacing']))
f.write(";%s" % list_to_string(a['plans_per_stage'][stages[stage]]['original_spacing']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['pool_op_kernel_sizes']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['conv_kernel_sizes']))
if do_linebreak_at_end:
f.write("\n")
if __name__ == "__main__":
summarize((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 24, 27), output_dir=join(network_training_output_dir, "summary_fold0"), folds=(0,))
base_dir = os.environ['RESULTS_FOLDER']
nnunets = ['nnUNetV2', 'nnUNetV2_zspacing']
task_ids = list(range(99))
with open("summary.csv", 'w') as f:
f.write("identifier;stage;batch_size;num_pool_per_axis;patch_size;patch_size(mm);median_patient_size_in_voxels;median_patient_size_in_mm;current_spacing;original_spacing;pool_op_kernel_sizes;conv_kernel_sizes;patient_dc;global_dc\n")
for i in task_ids:
for nnunet in nnunets:
try:
summary_folder = join(base_dir, nnunet, "summary_fold0")
if isdir(summary_folder):
summary_files = subfiles(summary_folder, join=False, prefix="Task%03.0d_" % i, suffix=".json", sort=True)
for s in summary_files:
tmp = s.split("__")
trainer = tmp[2]
expected_output_folder = join(base_dir, nnunet, tmp[1], tmp[0], tmp[2].split(".")[0])
name = tmp[0] + "__" + nnunet + "__" + tmp[1] + "__" + tmp[2].split(".")[0]
global_dice_json = join(base_dir, nnunet, tmp[1], tmp[0], tmp[2].split(".")[0], "fold_0", "validation_tiledTrue_doMirror_True", "global_dice.json")
if not isdir(expected_output_folder) or len(tmp) > 3:
if len(tmp) == 2:
continue
expected_output_folder = join(base_dir, nnunet, tmp[1], tmp[0], tmp[2] + "__" + tmp[3].split(".")[0])
name = tmp[0] + "__" + nnunet + "__" + tmp[1] + "__" + tmp[2] + "__" + tmp[3].split(".")[0]
global_dice_json = join(base_dir, nnunet, tmp[1], tmp[0], tmp[2] + "__" + tmp[3].split(".")[0], "fold_0", "validation_tiledTrue_doMirror_True", "global_dice.json")
assert isdir(expected_output_folder), "expected output dir not found"
plans_file = join(expected_output_folder, "plans.pkl")
assert isfile(plans_file)
plans = load_pickle(plans_file)
num_stages = len(plans['plans_per_stage'])
if num_stages > 1 and tmp[1] == "3d_fullres":
stage = 1
elif (num_stages == 1 and tmp[1] == "3d_fullres") or tmp[1] == "3d_lowres":
stage = 0
else:
print("skipping", s)
continue
g_dc = load_json(global_dice_json)
mn_glob_dc = np.mean(list(g_dc.values()))
write_plans_to_file(f, plans_file, stage, False, name)
# now read and add result to end of line
results = load_json(join(summary_folder, s))
mean_dc = results['results']['mean']['mean']['Dice']
f.write(";%03.3f" % mean_dc)
f.write(";%03.3f\n" % mn_glob_dc)
print(name, mean_dc)
except Exception as e:
print(e)
| 6,207
| 54.927928
| 241
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/collect_all_fold0_results_and_summarize_in_one_csv.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.evaluation.model_selection.summarize_results_in_one_json import summarize2
from nnunet.paths import network_training_output_dir
from batchgenerators.utilities.file_and_folder_operations import *
if __name__ == "__main__":
summary_output_folder = join(network_training_output_dir, "summary_jsons_fold0_new")
maybe_mkdir_p(summary_output_folder)
summarize2(['all'], output_dir=summary_output_folder, folds=(0,))
results_csv = join(network_training_output_dir, "summary_fold0.csv")
summary_files = subfiles(summary_output_folder, suffix='.json', join=False)
with open(results_csv, 'w') as f:
for s in summary_files:
if s.find("ensemble") == -1:
task, network, trainer, plans, validation_folder, folds = s.split("__")
else:
n1, n2 = s.split("--")
n1 = n1[n1.find("ensemble_") + len("ensemble_") :]
task = s.split("__")[0]
network = "ensemble"
trainer = n1
plans = n2
validation_folder = "none"
folds = folds[:-len('.json')]
results = load_json(join(summary_output_folder, s))
results_mean = results['results']['mean']['mean']['Dice']
results_median = results['results']['median']['mean']['Dice']
f.write("%s,%s,%s,%s,%s,%02.4f,%02.4f\n" % (task,
network, trainer, validation_folder, plans, results_mean, results_median))
summary_output_folder = join(network_training_output_dir, "summary_jsons_new")
maybe_mkdir_p(summary_output_folder)
summarize2(['all'], output_dir=summary_output_folder)
results_csv = join(network_training_output_dir, "summary_allFolds.csv")
summary_files = subfiles(summary_output_folder, suffix='.json', join=False)
with open(results_csv, 'w') as f:
for s in summary_files:
if s.find("ensemble") == -1:
task, network, trainer, plans, validation_folder, folds = s.split("__")
else:
n1, n2 = s.split("--")
n1 = n1[n1.find("ensemble_") + len("ensemble_") :]
task = s.split("__")[0]
network = "ensemble"
trainer = n1
plans = n2
validation_folder = "none"
folds = folds[:-len('.json')]
results = load_json(join(summary_output_folder, s))
results_mean = results['results']['mean']['mean']['Dice']
results_median = results['results']['median']['mean']['Dice']
f.write("%s,%s,%s,%s,%s,%02.4f,%02.4f\n" % (task,
network, trainer, validation_folder, plans, results_mean, results_median))
| 3,483
| 46.081081
| 118
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/ensemble.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
import shutil
import numpy as np
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir, preprocessing_output_dir, default_plans_identifier
import argparse
from nnunet.postprocessing.connected_components import determine_postprocessing
def merge(args):
file1, file2, properties_file, out_file = args
if not isfile(out_file):
res1 = np.load(file1)['softmax']
res2 = np.load(file2)['softmax']
props = load_pickle(properties_file)
mn = np.mean((res1, res2), 0)
# Softmax probabilities are already at target spacing so this will not do any resampling (resampling parameters
# don't matter here)
save_segmentation_nifti_from_softmax(mn, out_file, props, 3, None, None, None, force_separate_z=None,
interpolation_order_z=0)
def ensemble(training_output_folder1, training_output_folder2, output_folder, task, validation_folder, folds):
print("\nEnsembling folders\n", training_output_folder1, "\n", training_output_folder2)
output_folder_base = output_folder
output_folder = join(output_folder_base, "ensembled_raw")
# only_keep_largest_connected_component is the same for all stages
dataset_directory = join(preprocessing_output_dir, task)
plans = load_pickle(join(training_output_folder1, "plans.pkl")) # we need this only for the labels
files1 = []
files2 = []
property_files = []
out_files = []
gt_segmentations = []
folder_with_gt_segs = join(dataset_directory, "gt_segmentations")
# in the correct shape and we need the original geometry to restore the niftis
for f in folds:
validation_folder_net1 = join(training_output_folder1, "fold_%d" % f, validation_folder)
validation_folder_net2 = join(training_output_folder2, "fold_%d" % f, validation_folder)
patient_identifiers1 = subfiles(validation_folder_net1, False, None, 'npz', True)
patient_identifiers2 = subfiles(validation_folder_net2, False, None, 'npz', True)
# we don't do postprocessing anymore so there should not be any of that noPostProcess
patient_identifiers1_nii = [i for i in subfiles(validation_folder_net1, False, None, suffix='nii.gz', sort=True) if not i.endswith("noPostProcess.nii.gz") and not i.endswith('_postprocessed.nii.gz')]
patient_identifiers2_nii = [i for i in subfiles(validation_folder_net2, False, None, suffix='nii.gz', sort=True) if not i.endswith("noPostProcess.nii.gz") and not i.endswith('_postprocessed.nii.gz')]
assert len(patient_identifiers1) == len(patient_identifiers1_nii), "npz seem to be missing. run validation with --npz"
assert len(patient_identifiers1) == len(patient_identifiers1_nii), "npz seem to be missing. run validation with --npz"
assert all([i[:-4] == j[:-7] for i, j in zip(patient_identifiers1, patient_identifiers1_nii)]), "npz seem to be missing. run validation with --npz"
assert all([i[:-4] == j[:-7] for i, j in zip(patient_identifiers2, patient_identifiers2_nii)]), "npz seem to be missing. run validation with --npz"
all_patient_identifiers = patient_identifiers1
for p in patient_identifiers2:
if p not in all_patient_identifiers:
all_patient_identifiers.append(p)
# assert these patients exist for both methods
assert all([isfile(join(validation_folder_net1, i)) for i in all_patient_identifiers])
assert all([isfile(join(validation_folder_net2, i)) for i in all_patient_identifiers])
maybe_mkdir_p(output_folder)
for p in all_patient_identifiers:
files1.append(join(validation_folder_net1, p))
files2.append(join(validation_folder_net2, p))
property_files.append(join(validation_folder_net1, p)[:-3] + "pkl")
out_files.append(join(output_folder, p[:-4] + ".nii.gz"))
gt_segmentations.append(join(folder_with_gt_segs, p[:-4] + ".nii.gz"))
p = Pool(default_num_threads)
p.map(merge, zip(files1, files2, property_files, out_files))
p.close()
p.join()
if not isfile(join(output_folder, "summary.json")) and len(out_files) > 0:
aggregate_scores(tuple(zip(out_files, gt_segmentations)), labels=plans['all_classes'],
json_output_file=join(output_folder, "summary.json"), json_task=task,
json_name=task + "__" + output_folder_base.split("/")[-1], num_threads=default_num_threads)
if not isfile(join(output_folder_base, "postprocessing.json")):
# now lets also look at postprocessing. We cannot just take what we determined in cross-validation and apply it
# here because things may have changed and may also be too inconsistent between the two networks
determine_postprocessing(output_folder_base, folder_with_gt_segs, "ensembled_raw", "temp",
"ensembled_postprocessed", default_num_threads, dice_threshold=0)
out_dir_all_json = join(network_training_output_dir, "summary_jsons")
json_out = load_json(join(output_folder_base, "ensembled_postprocessed", "summary.json"))
json_out["experiment_name"] = output_folder_base.split("/")[-1]
save_json(json_out, join(output_folder_base, "ensembled_postprocessed", "summary.json"))
maybe_mkdir_p(out_dir_all_json)
shutil.copy(join(output_folder_base, "ensembled_postprocessed", "summary.json"),
join(out_dir_all_json, "%s__%s.json" % (task, output_folder_base.split("/")[-1])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage="This is intended to ensemble training images (from cross-validation) only. Use"
"inference/ensemble_predictions.py instead")
parser.add_argument("training_output_folder1")
parser.add_argument("training_output_folder2")
parser.add_argument("output_folder")
parser.add_argument("task") # we need to know this for gt_segmentations
parser.add_argument("validation_folder")
parser.add_argument("--folds", nargs='+', type=int, default=(0, 1, 2, 3, 4), required=False)
args = parser.parse_args()
training_output_folder1 = args.training_output_folder1
training_output_folder2 = args.training_output_folder2
ensemble(training_output_folder1, training_output_folder2, args.output_folder, args.task, args.validation_folder,
args.folds)
| 7,435
| 54.492537
| 207
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/evaluation/model_selection/summarize_results_in_one_json.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nnunet.evaluation.add_mean_dice_to_json import foreground_mean
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
import numpy as np
def summarize(tasks, models=('2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'),
output_dir=join(network_training_output_dir, "summary_jsons"), folds=(0, 1, 2, 3, 4)):
maybe_mkdir_p(output_dir)
if len(tasks) == 1 and tasks[0] == "all":
tasks = list(range(999))
else:
tasks = [int(i) for i in tasks]
for model in models:
for t in tasks:
t = int(t)
if not isdir(join(network_training_output_dir, model)):
continue
task_name = subfolders(join(network_training_output_dir, model), prefix="Task%03.0d" % t, join=False)
if len(task_name) != 1:
print("did not find unique output folder for network %s and task %s" % (model, t))
continue
task_name = task_name[0]
out_dir_task = join(network_training_output_dir, model, task_name)
model_trainers = subdirs(out_dir_task, join=False)
for trainer in model_trainers:
if trainer.startswith("fold"):
continue
out_dir = join(out_dir_task, trainer)
validation_folders = []
for fld in folds:
d = join(out_dir, "fold%d"%fld)
if not isdir(d):
d = join(out_dir, "fold_%d"%fld)
if not isdir(d):
break
validation_folders += subfolders(d, prefix="validation", join=False)
for v in validation_folders:
ok = True
metrics = OrderedDict()
for fld in folds:
d = join(out_dir, "fold%d"%fld)
if not isdir(d):
d = join(out_dir, "fold_%d"%fld)
if not isdir(d):
ok = False
break
validation_folder = join(d, v)
if not isfile(join(validation_folder, "summary.json")):
print("summary.json missing for net %s task %s fold %d" % (model, task_name, fld))
ok = False
break
metrics_tmp = load_json(join(validation_folder, "summary.json"))["results"]["mean"]
for l in metrics_tmp.keys():
if metrics.get(l) is None:
metrics[l] = OrderedDict()
for m in metrics_tmp[l].keys():
if metrics[l].get(m) is None:
metrics[l][m] = []
metrics[l][m].append(metrics_tmp[l][m])
if ok:
for l in metrics.keys():
for m in metrics[l].keys():
assert len(metrics[l][m]) == len(folds)
metrics[l][m] = np.mean(metrics[l][m])
json_out = OrderedDict()
json_out["results"] = OrderedDict()
json_out["results"]["mean"] = metrics
json_out["task"] = task_name
json_out["description"] = model + " " + task_name + " all folds summary"
json_out["name"] = model + " " + task_name + " all folds summary"
json_out["experiment_name"] = model
save_json(json_out, join(out_dir, "summary_allFolds__%s.json" % v))
save_json(json_out, join(output_dir, "%s__%s__%s__%s.json" % (task_name, model, trainer, v)))
foreground_mean(join(out_dir, "summary_allFolds__%s.json" % v))
foreground_mean(join(output_dir, "%s__%s__%s__%s.json" % (task_name, model, trainer, v)))
def summarize2(task_ids, models=('2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'),
output_dir=join(network_training_output_dir, "summary_jsons"), folds=(0, 1, 2, 3, 4)):
maybe_mkdir_p(output_dir)
if len(task_ids) == 1 and task_ids[0] == "all":
task_ids = list(range(999))
else:
task_ids = [int(i) for i in task_ids]
for model in models:
for t in task_ids:
if not isdir(join(network_training_output_dir, model)):
continue
task_name = subfolders(join(network_training_output_dir, model), prefix="Task%03.0d" % t, join=False)
if len(task_name) != 1:
print("did not find unique output folder for network %s and task %s" % (model, t))
continue
task_name = task_name[0]
out_dir_task = join(network_training_output_dir, model, task_name)
model_trainers = subdirs(out_dir_task, join=False)
for trainer in model_trainers:
if trainer.startswith("fold"):
continue
out_dir = join(out_dir_task, trainer)
validation_folders = []
for fld in folds:
fold_output_dir = join(out_dir, "fold_%d"%fld)
if not isdir(fold_output_dir):
continue
validation_folders += subfolders(fold_output_dir, prefix="validation", join=False)
validation_folders = np.unique(validation_folders)
for v in validation_folders:
ok = True
metrics = OrderedDict()
metrics['mean'] = OrderedDict()
metrics['median'] = OrderedDict()
metrics['all'] = OrderedDict()
for fld in folds:
fold_output_dir = join(out_dir, "fold_%d"%fld)
if not isdir(fold_output_dir):
print("fold missing", model, task_name, trainer, fld)
ok = False
break
validation_folder = join(fold_output_dir, v)
if not isdir(validation_folder):
print("validation folder missing", model, task_name, trainer, fld, v)
ok = False
break
if not isfile(join(validation_folder, "summary.json")):
print("summary.json missing", model, task_name, trainer, fld, v)
ok = False
break
all_metrics = load_json(join(validation_folder, "summary.json"))["results"]
# we now need to get the mean and median metrics. We use the mean metrics just to get the
# names of computed metics, we ignore the precomputed mean and do it ourselfes again
mean_metrics = all_metrics["mean"]
all_labels = [i for i in list(mean_metrics.keys()) if i != "mean"]
if len(all_labels) == 0: print(v, fld); break
all_metrics_names = list(mean_metrics[all_labels[0]].keys())
for l in all_labels:
# initialize the data structure, no values are copied yet
for k in ['mean', 'median', 'all']:
if metrics[k].get(l) is None:
metrics[k][l] = OrderedDict()
for m in all_metrics_names:
if metrics['all'][l].get(m) is None:
metrics['all'][l][m] = []
for entry in all_metrics['all']:
for l in all_labels:
for m in all_metrics_names:
metrics['all'][l][m].append(entry[l][m])
# now compute mean and median
for l in metrics['all'].keys():
for m in metrics['all'][l].keys():
metrics['mean'][l][m] = np.nanmean(metrics['all'][l][m])
metrics['median'][l][m] = np.nanmedian(metrics['all'][l][m])
if ok:
fold_string = ""
for f in folds:
fold_string += str(f)
json_out = OrderedDict()
json_out["results"] = OrderedDict()
json_out["results"]["mean"] = metrics['mean']
json_out["results"]["median"] = metrics['median']
json_out["task"] = task_name
json_out["description"] = model + " " + task_name + "summary folds" + str(folds)
json_out["name"] = model + " " + task_name + "summary folds" + str(folds)
json_out["experiment_name"] = model
save_json(json_out, join(output_dir, "%s__%s__%s__%s__%s.json" % (task_name, model, trainer, v, fold_string)))
foreground_mean2(join(output_dir, "%s__%s__%s__%s__%s.json" % (task_name, model, trainer, v, fold_string)))
def foreground_mean2(filename):
with open(filename, 'r') as f:
res = json.load(f)
class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean') and i != '0'])
metric_names = res['results']['mean']['1'].keys()
res['results']['mean']["mean"] = OrderedDict()
res['results']['median']["mean"] = OrderedDict()
for m in metric_names:
foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids]
res['results']['mean']["mean"][m] = np.nanmean(foreground_values)
foreground_values = [res['results']['median'][str(i)][m] for i in class_ids]
res['results']['median']["mean"][m] = np.nanmean(foreground_values)
with open(filename, 'w') as f:
json.dump(res, f, indent=4, sort_keys=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(usage="This is intended to identify the best model based on the five fold "
"cross-validation. Running this script requires alle models to have been run "
"already. This script will summarize the results of the five folds of all "
"models in one json each for easy interpretability")
parser.add_argument("-t", '--task_ids', nargs="+", required=True, help="task id. can be 'all'")
parser.add_argument("-f", '--folds', nargs="+", required=False, type=int, default=[0, 1, 2, 3, 4])
parser.add_argument("-m", '--models', nargs="+", required=False, default=['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'])
args = parser.parse_args()
tasks = args.task_ids
models = args.models
folds = args.folds
summarize2(tasks, models, folds=folds, output_dir=join(network_training_output_dir, "summary_jsons_new"))
| 12,141
| 50.232068
| 134
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/model_restore.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnunet
import torch
from batchgenerators.utilities.file_and_folder_operations import *
import importlib
import pkgutil
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
def recursive_find_python_class(folder, trainer_name, current_module):
tr = None
for importer, modname, ispkg in pkgutil.iter_modules(folder):
# print(modname, ispkg)
if not ispkg:
m = importlib.import_module(current_module + "." + modname)
if hasattr(m, trainer_name):
tr = getattr(m, trainer_name)
break
if tr is None:
for importer, modname, ispkg in pkgutil.iter_modules(folder):
if ispkg:
next_current_module = current_module + "." + modname
tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module)
if tr is not None:
break
return tr
def restore_model(pkl_file, checkpoint=None, train=False, fp16=None):
"""
This is a utility function to load any nnUNet trainer from a pkl. It will recursively search
nnunet.trainig.network_training for the file that contains the trainer and instantiate it with the arguments saved in the pkl file. If checkpoint
is specified, it will furthermore load the checkpoint file in train/test mode (as specified by train).
The pkl file required here is the one that will be saved automatically when calling nnUNetTrainer.save_checkpoint.
:param pkl_file:
:param checkpoint:
:param train:
:param fp16: if None then we take no action. If True/False we overwrite what the model has in its init
:return:
"""
info = load_pickle(pkl_file)
init = info['init']
name = info['name']
search_in = join(nnunet.__path__[0], "training", "network_training")
tr = recursive_find_python_class([search_in], name, current_module="nnunet.training.network_training")
if tr is None:
"""
Fabian only. This will trigger searching for trainer classes in other repositories as well
"""
try:
import meddec
search_in = join(meddec.__path__[0], "model_training")
tr = recursive_find_python_class([search_in], name, current_module="meddec.model_training")
except ImportError:
pass
if tr is None:
raise RuntimeError("Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it "
"is not located there, please move it or change the code of restore_model. Your model "
"trainer can be located in any directory within nnunet.trainig.network_training (search is recursive)."
"\nDebug info: \ncheckpoint file: %s\nName of trainer: %s " % (checkpoint, name))
assert issubclass(tr, nnUNetTrainer), "The network trainer was found but is not a subclass of nnUNetTrainer. " \
"Please make it so!"
# this is now deprecated
"""if len(init) == 7:
print("warning: this model seems to have been saved with a previous version of nnUNet. Attempting to load it "
"anyways. Expect the unexpected.")
print("manually editing init args...")
init = [init[i] for i in range(len(init)) if i != 2]"""
# ToDo Fabian make saves use kwargs, please...
trainer = tr(*init)
# We can hack fp16 overwriting into the trainer without changing the init arguments because nothing happens with
# fp16 in the init, it just saves it to a member variable
if fp16 is not None:
trainer.fp16 = fp16
trainer.process_plans(info['plans'])
if checkpoint is not None:
trainer.load_checkpoint(checkpoint, train)
return trainer
def load_best_model_for_inference(folder):
checkpoint = join(folder, "model_best.model")
pkl_file = checkpoint + ".pkl"
return restore_model(pkl_file, checkpoint, False)
def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name="model_best"):
"""
used for if you need to ensemble the five models of a cross-validation. This will restore the model from the
checkpoint in fold 0, load all parameters of the five folds in ram and return both. This will allow for fast
switching between parameters (as opposed to loading them form disk each time).
This is best used for inference and test prediction
:param folder:
:param folds:
:param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init
:return:
"""
if isinstance(folds, str):
folds = [join(folder, "all")]
assert isdir(folds[0]), "no output folder for fold %s found" % folds
elif isinstance(folds, (list, tuple)):
if len(folds) == 1 and folds[0] == "all":
folds = [join(folder, "all")]
else:
folds = [join(folder, "fold_%d" % i) for i in folds]
assert all([isdir(i) for i in folds]), "list of folds specified but not all output folders are present"
elif isinstance(folds, int):
folds = [join(folder, "fold_%d" % folds)]
assert all([isdir(i) for i in folds]), "output folder missing for fold %d" % folds
elif folds is None:
print("folds is None so we will automatically look for output folders (not using \'all\'!)")
folds = subfolders(folder, prefix="fold")
print("found the following folds: ", folds)
else:
raise ValueError("Unknown value for folds. Type: %s. Expected: list of int, int, str or None", str(type(folds)))
trainer = restore_model(join(folds[0], "%s.model.pkl" % checkpoint_name), fp16=mixed_precision)
trainer.output_folder = folder
trainer.output_folder_base = folder
trainer.update_fold(0)
trainer.initialize(False)
all_best_model_files = [join(i, "%s.model" % checkpoint_name) for i in folds]
print("using the following model files: ", all_best_model_files)
all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files]
return trainer, all_params
if __name__ == "__main__":
pkl = "/home/fabian/PhD/results/nnUNetV2/nnUNetV2_3D_fullres/Task004_Hippocampus/fold0/model_best.model.pkl"
checkpoint = pkl[:-4]
train = False
trainer = restore_model(pkl, checkpoint, train)
| 7,125
| 44.679487
| 149
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/dataloading/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/dataloading/dataset_loading.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from batchgenerators.augmentations.utils import random_crop_2D_image_batched, pad_nd_image
import numpy as np
from batchgenerators.dataloading import SlimDataLoaderBase
from multiprocessing import Pool
from nnunet.configuration import default_num_threads
from nnunet.paths import preprocessing_output_dir
from batchgenerators.utilities.file_and_folder_operations import *
def get_case_identifiers(folder):
case_identifiers = [i[:-4] for i in os.listdir(folder) if i.endswith("npz") and (i.find("segFromPrevStage") == -1)]
return case_identifiers
def get_case_identifiers_from_raw_folder(folder):
case_identifiers = np.unique(
[i[:-12] for i in os.listdir(folder) if i.endswith(".nii.gz") and (i.find("segFromPrevStage") == -1)])
return case_identifiers
def convert_to_npy(args):
if not isinstance(args, tuple):
key = "data"
npz_file = args
else:
npz_file, key = args
if not isfile(npz_file[:-3] + "npy"):
a = np.load(npz_file)[key]
np.save(npz_file[:-3] + "npy", a)
def save_as_npz(args):
if not isinstance(args, tuple):
key = "data"
npy_file = args
else:
npy_file, key = args
d = np.load(npy_file)
np.savez_compressed(npy_file[:-3] + "npz", **{key: d})
def unpack_dataset(folder, threads=default_num_threads, key="data"):
"""
unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)
:param folder:
:param threads:
:param key:
:return:
"""
p = Pool(threads)
npz_files = subfiles(folder, True, None, ".npz", True)
p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))
p.close()
p.join()
def pack_dataset(folder, threads=default_num_threads, key="data"):
p = Pool(threads)
npy_files = subfiles(folder, True, None, ".npy", True)
p.map(save_as_npz, zip(npy_files, [key] * len(npy_files)))
p.close()
p.join()
def delete_npy(folder):
case_identifiers = get_case_identifiers(folder)
npy_files = [join(folder, i + ".npy") for i in case_identifiers]
npy_files = [i for i in npy_files if isfile(i)]
for n in npy_files:
os.remove(n)
def load_dataset(folder, num_cases_properties_loading_threshold=1000):
# we don't load the actual data but instead return the filename to the np file.
print('loading dataset')
case_identifiers = get_case_identifiers(folder)
case_identifiers.sort()
dataset = OrderedDict()
for c in case_identifiers:
dataset[c] = OrderedDict()
dataset[c]['data_file'] = join(folder, "%s.npz" % c)
# dataset[c]['properties'] = load_pickle(join(folder, "%s.pkl" % c))
dataset[c]['properties_file'] = join(folder, "%s.pkl" % c)
if dataset[c].get('seg_from_prev_stage_file') is not None:
dataset[c]['seg_from_prev_stage_file'] = join(folder, "%s_segs.npz" % c)
if len(case_identifiers) <= num_cases_properties_loading_threshold:
print('loading all case properties')
for i in dataset.keys():
dataset[i]['properties'] = load_pickle(dataset[i]['properties_file'])
return dataset
def crop_2D_image_force_fg(img, crop_size, valid_voxels):
"""
img must be [c, x, y]
img[-1] must be the segmentation with segmentation>0 being foreground
:param img:
:param crop_size:
:param valid_voxels: voxels belonging to the selected class
:return:
"""
assert len(valid_voxels.shape) == 2
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 1)
else:
assert len(crop_size) == (len(
img.shape) - 1), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
# we need to find the center coords that we can crop to without exceeding the image border
lb_x = crop_size[0] // 2
ub_x = img.shape[1] - crop_size[0] // 2 - crop_size[0] % 2
lb_y = crop_size[1] // 2
ub_y = img.shape[2] - crop_size[1] // 2 - crop_size[1] % 2
if len(valid_voxels) == 0:
selected_center_voxel = (np.random.random_integers(lb_x, ub_x),
np.random.random_integers(lb_y, ub_y))
else:
selected_center_voxel = valid_voxels[np.random.choice(valid_voxels.shape[1]), :]
selected_center_voxel = np.array(selected_center_voxel)
for i in range(2):
selected_center_voxel[i] = max(crop_size[i] // 2, selected_center_voxel[i])
selected_center_voxel[i] = min(img.shape[i + 1] - crop_size[i] // 2 - crop_size[i] % 2,
selected_center_voxel[i])
result = img[:, (selected_center_voxel[0] - crop_size[0] // 2):(
selected_center_voxel[0] + crop_size[0] // 2 + crop_size[0] % 2),
(selected_center_voxel[1] - crop_size[1] // 2):(
selected_center_voxel[1] + crop_size[1] // 2 + crop_size[1] % 2)]
return result
class DataLoader3D(SlimDataLoaderBase):
def __init__(self, data, patch_size, final_patch_size, batch_size, has_prev_stage=False,
oversample_foreground_percent=0.0, memmap_mode="r", pad_mode="edge", pad_kwargs_data=None,
pad_sides=None):
"""
This is the basic data loader for 3D networks. It uses preprocessed data as produced by my (Fabian) preprocessing.
You can load the data with load_dataset(folder) where folder is the folder where the npz files are located. If there
are only npz files present in that folder, the data loader will unpack them on the fly. This may take a while
and increase CPU usage. Therefore, I advise you to call unpack_dataset(folder) first, which will unpack all npz
to npy. Don't forget to call delete_npy(folder) after you are done with training?
Why all the hassle? Well the decathlon dataset is huge. Using npy for everything will consume >1 TB and that is uncool
given that I (Fabian) will have to store that permanently on /datasets and my local computer. With this strategy all
data is stored in a compressed format (factor 10 smaller) and only unpacked when needed.
:param data: get this with load_dataset(folder, stage=0). Plug the return value in here and you are g2g (good to go)
:param patch_size: what patch size will this data loader return? it is common practice to first load larger
patches so that a central crop after data augmentation can be done to reduce border artifacts. If unsure, use
get_patch_size() from data_augmentation.default_data_augmentation
:param final_patch_size: what will the patch finally be cropped to (after data augmentation)? this is the patch
size that goes into your network. We need this here because we will pad patients in here so that patches at the
border of patients are sampled properly
:param batch_size:
:param num_batches: how many batches will the data loader produce before stopping? None=endless
:param seed:
:param stage: ignore this (Fabian only)
:param random: Sample keys randomly; CAREFUL! non-random sampling requires batch_size=1, otherwise you will iterate batch_size times over the dataset
:param oversample_foreground: half the batch will be forced to contain at least some foreground (equal prob for each of the foreground classes)
"""
super(DataLoader3D, self).__init__(data, batch_size, None)
if pad_kwargs_data is None:
pad_kwargs_data = OrderedDict()
self.pad_kwargs_data = pad_kwargs_data
self.pad_mode = pad_mode
self.oversample_foreground_percent = oversample_foreground_percent
self.final_patch_size = final_patch_size
self.has_prev_stage = has_prev_stage
self.patch_size = patch_size
self.list_of_keys = list(self._data.keys())
# need_to_pad denotes by how much we need to pad the data so that if we sample a patch of size final_patch_size
# (which is what the network will get) these patches will also cover the border of the patients
self.need_to_pad = (np.array(patch_size) - np.array(final_patch_size)).astype(int)
if pad_sides is not None:
if not isinstance(pad_sides, np.ndarray):
pad_sides = np.array(pad_sides)
self.need_to_pad += pad_sides
self.memmap_mode = memmap_mode
self.num_channels = None
self.pad_sides = pad_sides
self.data_shape, self.seg_shape = self.determine_shapes()
def get_do_oversample(self, batch_idx):
return not batch_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
def determine_shapes(self):
if self.has_prev_stage:
num_seg = 2
else:
num_seg = 1
k = list(self._data.keys())[0]
if isfile(self._data[k]['data_file'][:-4] + ".npy"):
case_all_data = np.load(self._data[k]['data_file'][:-4] + ".npy", self.memmap_mode)
else:
case_all_data = np.load(self._data[k]['data_file'])['data']
num_color_channels = case_all_data.shape[0] - 1
data_shape = (self.batch_size, num_color_channels, *self.patch_size)
seg_shape = (self.batch_size, num_seg, *self.patch_size)
return data_shape, seg_shape
def generate_train_batch(self):
selected_keys = np.random.choice(self.list_of_keys, self.batch_size, True, None)
data = np.zeros(self.data_shape, dtype=np.float32)
seg = np.zeros(self.seg_shape, dtype=np.float32)
case_properties = []
for j, i in enumerate(selected_keys):
# oversampling foreground will improve stability of model training, especially if many patches are empty
# (Lung for example)
if self.get_do_oversample(j):
force_fg = True
else:
force_fg = False
if 'properties' in self._data[i].keys():
properties = self._data[i]['properties']
else:
properties = load_pickle(self._data[i]['properties_file'])
case_properties.append(properties)
# cases are stored as npz, but we require unpack_dataset to be run. This will decompress them into npy
# which is much faster to access
if isfile(self._data[i]['data_file'][:-4] + ".npy"):
case_all_data = np.load(self._data[i]['data_file'][:-4] + ".npy", self.memmap_mode)
else:
case_all_data = np.load(self._data[i]['data_file'])['data']
# If we are doing the cascade then we will also need to load the segmentation of the previous stage and
# concatenate it. Here it will be concatenates to the segmentation because the augmentations need to be
# applied to it in segmentation mode. Later in the data augmentation we move it from the segmentations to
# the last channel of the data
if self.has_prev_stage:
if isfile(self._data[i]['seg_from_prev_stage_file'][:-4] + ".npy"):
segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'][:-4] + ".npy",
mmap_mode=self.memmap_mode)[None]
else:
segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'])['data'][None]
# we theoretically support several possible previsous segmentations from which only one is sampled. But
# in practice this feature was never used so it's always only one segmentation
seg_key = np.random.choice(segs_from_previous_stage.shape[0])
seg_from_previous_stage = segs_from_previous_stage[seg_key:seg_key + 1]
assert all([i == j for i, j in zip(seg_from_previous_stage.shape[1:], case_all_data.shape[1:])]), \
"seg_from_previous_stage does not match the shape of case_all_data: %s vs %s" % \
(str(seg_from_previous_stage.shape[1:]), str(case_all_data.shape[1:]))
else:
seg_from_previous_stage = None
# do you trust me? You better do. Otherwise you'll have to go through this mess and honestly there are
# better things you could do right now
# (above) documentation of the day. Nice. Even myself coming back 1 months later I have not friggin idea
# what's going on. I keep the above documentation just for fun but attempt to make things clearer now
need_to_pad = self.need_to_pad
for d in range(3):
# if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
# always
if need_to_pad[d] + case_all_data.shape[d + 1] < self.patch_size[d]:
need_to_pad[d] = self.patch_size[d] - case_all_data.shape[d + 1]
# we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
# define what the upper and lower bound can be to then sample form them with np.random.randint
shape = case_all_data.shape[1:]
lb_x = - need_to_pad[0] // 2
ub_x = shape[0] + need_to_pad[0] // 2 + need_to_pad[0] % 2 - self.patch_size[0]
lb_y = - need_to_pad[1] // 2
ub_y = shape[1] + need_to_pad[1] // 2 + need_to_pad[1] % 2 - self.patch_size[1]
lb_z = - need_to_pad[2] // 2
ub_z = shape[2] + need_to_pad[2] // 2 + need_to_pad[2] % 2 - self.patch_size[2]
# if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
# at least one of the foreground classes in the patch
if not force_fg:
bbox_x_lb = np.random.randint(lb_x, ub_x + 1)
bbox_y_lb = np.random.randint(lb_y, ub_y + 1)
bbox_z_lb = np.random.randint(lb_z, ub_z + 1)
else:
# these values should have been precomputed
if 'class_locations' not in properties.keys():
raise RuntimeError("Please rerun the preprocessing with the newest version of nnU-Net!")
# this saves us a np.unique. Preprocessing already did that for all cases. Neat.
foreground_classes = np.array(
[i for i in properties['class_locations'].keys() if len(properties['class_locations'][i]) != 0])
foreground_classes = foreground_classes[foreground_classes > 0]
if len(foreground_classes) == 0:
# this only happens if some image does not contain foreground voxels at all
selected_class = None
voxels_of_that_class = None
print('case does not contain any foreground classes', i)
else:
selected_class = np.random.choice(foreground_classes)
voxels_of_that_class = properties['class_locations'][selected_class]
if voxels_of_that_class is not None:
selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]
# selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.
# Make sure it is within the bounds of lb and ub
bbox_x_lb = max(lb_x, selected_voxel[0] - self.patch_size[0] // 2)
bbox_y_lb = max(lb_y, selected_voxel[1] - self.patch_size[1] // 2)
bbox_z_lb = max(lb_z, selected_voxel[2] - self.patch_size[2] // 2)
else:
# If the image does not contain any foreground classes, we fall back to random cropping
bbox_x_lb = np.random.randint(lb_x, ub_x + 1)
bbox_y_lb = np.random.randint(lb_y, ub_y + 1)
bbox_z_lb = np.random.randint(lb_z, ub_z + 1)
bbox_x_ub = bbox_x_lb + self.patch_size[0]
bbox_y_ub = bbox_y_lb + self.patch_size[1]
bbox_z_ub = bbox_z_lb + self.patch_size[2]
# whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the
# bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.
# valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size
# later
valid_bbox_x_lb = max(0, bbox_x_lb)
valid_bbox_x_ub = min(shape[0], bbox_x_ub)
valid_bbox_y_lb = max(0, bbox_y_lb)
valid_bbox_y_ub = min(shape[1], bbox_y_ub)
valid_bbox_z_lb = max(0, bbox_z_lb)
valid_bbox_z_ub = min(shape[2], bbox_z_ub)
# At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.
# Why not just concatenate them here and forget about the if statements? Well that's because segneeds to
# be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also
# remove label -1 in the data augmentation but this way it is less error prone)
case_all_data = np.copy(case_all_data[:, valid_bbox_x_lb:valid_bbox_x_ub,
valid_bbox_y_lb:valid_bbox_y_ub,
valid_bbox_z_lb:valid_bbox_z_ub])
if seg_from_previous_stage is not None:
seg_from_previous_stage = seg_from_previous_stage[:, valid_bbox_x_lb:valid_bbox_x_ub,
valid_bbox_y_lb:valid_bbox_y_ub,
valid_bbox_z_lb:valid_bbox_z_ub]
data[j] = np.pad(case_all_data[:-1], ((0, 0),
(-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),
(-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0)),
(-min(0, bbox_z_lb), max(bbox_z_ub - shape[2], 0))),
self.pad_mode, **self.pad_kwargs_data)
seg[j, 0] = np.pad(case_all_data[-1:], ((0, 0),
(-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),
(-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0)),
(-min(0, bbox_z_lb), max(bbox_z_ub - shape[2], 0))),
'constant', **{'constant_values': -1})
if seg_from_previous_stage is not None:
seg[j, 1] = np.pad(seg_from_previous_stage, ((0, 0),
(-min(0, bbox_x_lb),
max(bbox_x_ub - shape[0], 0)),
(-min(0, bbox_y_lb),
max(bbox_y_ub - shape[1], 0)),
(-min(0, bbox_z_lb),
max(bbox_z_ub - shape[2], 0))),
'constant', **{'constant_values': 0})
return {'data': data, 'seg': seg, 'properties': case_properties, 'keys': selected_keys}
class DataLoader2D(SlimDataLoaderBase):
def __init__(self, data, patch_size, final_patch_size, batch_size, oversample_foreground_percent=0.0,
memmap_mode="r", pseudo_3d_slices=1, pad_mode="edge",
pad_kwargs_data=None, pad_sides=None):
"""
This is the basic data loader for 2D networks. It uses preprocessed data as produced by my (Fabian) preprocessing.
You can load the data with load_dataset(folder) where folder is the folder where the npz files are located. If there
are only npz files present in that folder, the data loader will unpack them on the fly. This may take a while
and increase CPU usage. Therefore, I advise you to call unpack_dataset(folder) first, which will unpack all npz
to npy. Don't forget to call delete_npy(folder) after you are done with training?
Why all the hassle? Well the decathlon dataset is huge. Using npy for everything will consume >1 TB and that is uncool
given that I (Fabian) will have to store that permanently on /datasets and my local computer. With htis strategy all
data is stored in a compressed format (factor 10 smaller) and only unpacked when needed.
:param data: get this with load_dataset(folder, stage=0). Plug the return value in here and you are g2g (good to go)
:param patch_size: what patch size will this data loader return? it is common practice to first load larger
patches so that a central crop after data augmentation can be done to reduce border artifacts. If unsure, use
get_patch_size() from data_augmentation.default_data_augmentation
:param final_patch_size: what will the patch finally be cropped to (after data augmentation)? this is the patch
size that goes into your network. We need this here because we will pad patients in here so that patches at the
border of patients are sampled properly
:param batch_size:
:param num_batches: how many batches will the data loader produce before stopping? None=endless
:param seed:
:param stage: ignore this (Fabian only)
:param transpose: ignore this
:param random: sample randomly; CAREFUL! non-random sampling requires batch_size=1, otherwise you will iterate batch_size times over the dataset
:param pseudo_3d_slices: 7 = 3 below and 3 above the center slice
"""
super(DataLoader2D, self).__init__(data, batch_size, None)
if pad_kwargs_data is None:
pad_kwargs_data = OrderedDict()
self.pad_kwargs_data = pad_kwargs_data
self.pad_mode = pad_mode
self.pseudo_3d_slices = pseudo_3d_slices
self.oversample_foreground_percent = oversample_foreground_percent
self.final_patch_size = final_patch_size
self.patch_size = patch_size
self.list_of_keys = list(self._data.keys())
self.need_to_pad = np.array(patch_size) - np.array(final_patch_size)
self.memmap_mode = memmap_mode
if pad_sides is not None:
if not isinstance(pad_sides, np.ndarray):
pad_sides = np.array(pad_sides)
self.need_to_pad += pad_sides
self.pad_sides = pad_sides
self.data_shape, self.seg_shape = self.determine_shapes()
def determine_shapes(self):
num_seg = 1
k = list(self._data.keys())[0]
if isfile(self._data[k]['data_file'][:-4] + ".npy"):
case_all_data = np.load(self._data[k]['data_file'][:-4] + ".npy", self.memmap_mode)
else:
case_all_data = np.load(self._data[k]['data_file'])['data']
num_color_channels = case_all_data.shape[0] - num_seg
data_shape = (self.batch_size, num_color_channels, *self.patch_size)
seg_shape = (self.batch_size, num_seg, *self.patch_size)
return data_shape, seg_shape
def get_do_oversample(self, batch_idx):
return not batch_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
def generate_train_batch(self):
selected_keys = np.random.choice(self.list_of_keys, self.batch_size, True, None)
data = np.zeros(self.data_shape, dtype=np.float32)
seg = np.zeros(self.seg_shape, dtype=np.float32)
case_properties = []
for j, i in enumerate(selected_keys):
if 'properties' in self._data[i].keys():
properties = self._data[i]['properties']
else:
properties = load_pickle(self._data[i]['properties_file'])
case_properties.append(properties)
if self.get_do_oversample(j):
force_fg = True
else:
force_fg = False
if not isfile(self._data[i]['data_file'][:-4] + ".npy"):
# lets hope you know what you're doing
case_all_data = np.load(self._data[i]['data_file'][:-4] + ".npz")['data']
else:
case_all_data = np.load(self._data[i]['data_file'][:-4] + ".npy", self.memmap_mode)
# this is for when there is just a 2d slice in case_all_data (2d support)
if len(case_all_data.shape) == 3:
case_all_data = case_all_data[:, None]
# first select a slice. This can be either random (no force fg) or guaranteed to contain some class
if not force_fg:
random_slice = np.random.choice(case_all_data.shape[1])
selected_class = None
else:
# these values should have been precomputed
if 'class_locations' not in properties.keys():
raise RuntimeError("Please rerun the preprocessing with the newest version of nnU-Net!")
foreground_classes = np.array(
[i for i in properties['class_locations'].keys() if len(properties['class_locations'][i]) != 0])
foreground_classes = foreground_classes[foreground_classes > 0]
if len(foreground_classes) == 0:
selected_class = None
random_slice = np.random.choice(case_all_data.shape[1])
print('case does not contain any foreground classes', i)
else:
selected_class = np.random.choice(foreground_classes)
voxels_of_that_class = properties['class_locations'][selected_class]
valid_slices = np.unique(voxels_of_that_class[:, 0])
random_slice = np.random.choice(valid_slices)
voxels_of_that_class = voxels_of_that_class[voxels_of_that_class[:, 0] == random_slice]
voxels_of_that_class = voxels_of_that_class[:, 1:]
# now crop case_all_data to contain just the slice of interest. If we want additional slice above and
# below the current slice, here is where we get them. We stack those as additional color channels
if self.pseudo_3d_slices == 1:
case_all_data = case_all_data[:, random_slice]
else:
# this is very deprecated and will probably not work anymore. If you intend to use this you need to
# check this!
mn = random_slice - (self.pseudo_3d_slices - 1) // 2
mx = random_slice + (self.pseudo_3d_slices - 1) // 2 + 1
valid_mn = max(mn, 0)
valid_mx = min(mx, case_all_data.shape[1])
case_all_seg = case_all_data[-1:]
case_all_data = case_all_data[:-1]
case_all_data = case_all_data[:, valid_mn:valid_mx]
case_all_seg = case_all_seg[:, random_slice]
need_to_pad_below = valid_mn - mn
need_to_pad_above = mx - valid_mx
if need_to_pad_below > 0:
shp_for_pad = np.array(case_all_data.shape)
shp_for_pad[1] = need_to_pad_below
case_all_data = np.concatenate((np.zeros(shp_for_pad), case_all_data), 1)
if need_to_pad_above > 0:
shp_for_pad = np.array(case_all_data.shape)
shp_for_pad[1] = need_to_pad_above
case_all_data = np.concatenate((case_all_data, np.zeros(shp_for_pad)), 1)
case_all_data = case_all_data.reshape((-1, case_all_data.shape[-2], case_all_data.shape[-1]))
case_all_data = np.concatenate((case_all_data, case_all_seg), 0)
# case all data should now be (c, x, y)
assert len(case_all_data.shape) == 3
# we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
# define what the upper and lower bound can be to then sample form them with np.random.randint
need_to_pad = self.need_to_pad
for d in range(2):
# if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
# always
if need_to_pad[d] + case_all_data.shape[d + 1] < self.patch_size[d]:
need_to_pad[d] = self.patch_size[d] - case_all_data.shape[d + 1]
shape = case_all_data.shape[1:]
lb_x = - need_to_pad[0] // 2
ub_x = shape[0] + need_to_pad[0] // 2 + need_to_pad[0] % 2 - self.patch_size[0]
lb_y = - need_to_pad[1] // 2
ub_y = shape[1] + need_to_pad[1] // 2 + need_to_pad[1] % 2 - self.patch_size[1]
# if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
# at least one of the foreground classes in the patch
if not force_fg or selected_class is None:
bbox_x_lb = np.random.randint(lb_x, ub_x + 1)
bbox_y_lb = np.random.randint(lb_y, ub_y + 1)
else:
# this saves us a np.unique. Preprocessing already did that for all cases. Neat.
selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))]
# selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel.
# Make sure it is within the bounds of lb and ub
bbox_x_lb = max(lb_x, selected_voxel[0] - self.patch_size[0] // 2)
bbox_y_lb = max(lb_y, selected_voxel[1] - self.patch_size[1] // 2)
bbox_x_ub = bbox_x_lb + self.patch_size[0]
bbox_y_ub = bbox_y_lb + self.patch_size[1]
# whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the
# bbox that actually lies within the data. This will result in a smaller array which is then faster to pad.
# valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size
# later
valid_bbox_x_lb = max(0, bbox_x_lb)
valid_bbox_x_ub = min(shape[0], bbox_x_ub)
valid_bbox_y_lb = max(0, bbox_y_lb)
valid_bbox_y_ub = min(shape[1], bbox_y_ub)
# At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage.
# Why not just concatenate them here and forget about the if statements? Well that's because segneeds to
# be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also
# remove label -1 in the data augmentation but this way it is less error prone)
case_all_data = case_all_data[:, valid_bbox_x_lb:valid_bbox_x_ub,
valid_bbox_y_lb:valid_bbox_y_ub]
case_all_data_donly = np.pad(case_all_data[:-1], ((0, 0),
(-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),
(-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0))),
self.pad_mode, **self.pad_kwargs_data)
case_all_data_segonly = np.pad(case_all_data[-1:], ((0, 0),
(-min(0, bbox_x_lb), max(bbox_x_ub - shape[0], 0)),
(-min(0, bbox_y_lb), max(bbox_y_ub - shape[1], 0))),
'constant', **{'constant_values': -1})
data[j] = case_all_data_donly
seg[j] = case_all_data_segonly
keys = selected_keys
return {'data': data, 'seg': seg, 'properties': case_properties, "keys": keys}
if __name__ == "__main__":
t = "Task002_Heart"
p = join(preprocessing_output_dir, t, "stage1")
dataset = load_dataset(p)
with open(join(join(preprocessing_output_dir, t), "plans_stage1.pkl"), 'rb') as f:
plans = pickle.load(f)
unpack_dataset(p)
dl = DataLoader3D(dataset, (32, 32, 32), (32, 32, 32), 2, oversample_foreground_percent=0.33)
dl = DataLoader3D(dataset, np.array(plans['patch_size']).astype(int), np.array(plans['patch_size']).astype(int), 2,
oversample_foreground_percent=0.33)
dl2d = DataLoader2D(dataset, (64, 64), np.array(plans['patch_size']).astype(int)[1:], 12,
oversample_foreground_percent=0.33)
| 33,735
| 54.486842
| 157
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_DDP.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from multiprocessing import Pool
from time import sleep
from typing import Tuple
import numpy as np
import torch
import torch.distributed as dist
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join, subfiles, isfile, load_pickle, \
save_json
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.postprocessing.connected_components import determine_postprocessing
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.distributed import awesome_allgather_function
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from nnunet.utilities.to_torch import to_cuda, maybe_to_torch
from torch import nn, distributed
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import _LRScheduler
class nnUNetTrainerV2_DDP(nnUNetTrainerV2):
def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True,
stage=None,
unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.init_args = (
plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, distribute_batch_size, fp16)
self.distribute_batch_size = distribute_batch_size
np.random.seed(local_rank)
torch.manual_seed(local_rank)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(local_rank)
self.local_rank = local_rank
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
self.loss = None
self.ce_loss = RobustCrossEntropyLoss()
self.global_batch_size = None # we need to know this to properly steer oversample
def set_batch_size_and_oversample(self):
batch_sizes = []
oversample_percents = []
world_size = dist.get_world_size()
my_rank = dist.get_rank()
if self.distribute_batch_size:
self.global_batch_size = self.batch_size
else:
self.global_batch_size = self.batch_size * world_size
batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int)
for rank in range(world_size):
if self.distribute_batch_size:
if (rank + 1) * batch_size_per_GPU > self.batch_size:
batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size)
else:
batch_size = batch_size_per_GPU
else:
batch_size = self.batch_size
batch_sizes.append(batch_size)
sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1])
sample_id_high = np.sum(batch_sizes)
if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent):
oversample_percents.append(0.0)
elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent):
oversample_percents.append(1.0)
else:
percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size
oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) -
sample_id_low / self.global_batch_size) / percent_covered_by_this_rank)
oversample_percents.append(oversample_percent_here)
print("worker", my_rank, "oversample", oversample_percents[my_rank])
print("worker", my_rank, "batch_size", batch_sizes[my_rank])
self.batch_size = batch_sizes[my_rank]
self.oversample_foreground_percent = oversample_percents[my_rank]
def save_checkpoint(self, fname, save_optimizer=True):
if self.local_rank == 0:
super().save_checkpoint(fname, save_optimizer)
def plot_progress(self):
if self.local_rank == 0:
super().plot_progress()
def print_to_log_file(self, *args, also_print_to_console=True):
if self.local_rank == 0:
super().print_to_log_file(*args, also_print_to_console=also_print_to_console)
def process_plans(self, plans):
super().process_plans(plans)
self.set_batch_size_and_oversample()
def initialize(self, training=True, force_load_plans=False):
"""
:param training:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
if self.local_rank == 0:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
distributed.barrier()
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
# setting weights for deep supervision losses
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads'))
seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1))
print("seeds train", seeds_train)
print("seeds_val", seeds_val)
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
seeds_train=seeds_train,
seeds_val=seeds_val,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
self.network = DDP(self.network, device_ids=[self.local_rank])
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data, gpu_id=None)
target = to_cuda(target, gpu_id=None)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.compute_loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.compute_loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def compute_loss(self, output, target):
total_loss = None
for i in range(len(output)):
# Starting here it gets spicy!
axes = tuple(range(2, len(output[i].size())))
# network does not do softmax. We need to do softmax for dice
output_softmax = softmax_helper(output[i])
# get the tp, fp and fn terms we need
tp, fp, fn, _ = get_tp_fp_fn_tn(output_softmax, target[i], axes, mask=None)
# for dice, compute nominator and denominator so that we have to accumulate only 2 instead of 3 variables
# do_bg=False in nnUNetTrainer -> [:, 1:]
nominator = 2 * tp[:, 1:]
denominator = 2 * tp[:, 1:] + fp[:, 1:] + fn[:, 1:]
if self.batch_dice:
# for DDP we need to gather all nominator and denominator terms from all GPUS to do proper batch dice
nominator = awesome_allgather_function.apply(nominator)
denominator = awesome_allgather_function.apply(denominator)
nominator = nominator.sum(0)
denominator = denominator.sum(0)
else:
pass
ce_loss = self.ce_loss(output[i], target[i][:, 0].long())
# we smooth by 1e-5 to penalize false positives if tp is 0
dice_loss = (- (nominator + 1e-5) / (denominator + 1e-5)).mean()
if total_loss is None:
total_loss = self.ds_loss_weights[i] * (ce_loss + dice_loss)
else:
total_loss += self.ds_loss_weights[i] * (ce_loss + dice_loss)
return total_loss
def run_online_evaluation(self, output, target):
with torch.no_grad():
num_classes = output[0].shape[1]
output_seg = output[0].argmax(1)
target = target[0][:, 0]
axes = tuple(range(1, len(target.shape)))
tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
for c in range(1, num_classes):
tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes)
fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes)
fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes)
# tp_hard, fp_hard, fn_hard = get_tp_fp_fn((output_softmax > (1 / num_classes)).float(), target,
# axes, None)
# print_if_rank0("before allgather", tp_hard.shape)
tp_hard = tp_hard.sum(0, keepdim=False)[None]
fp_hard = fp_hard.sum(0, keepdim=False)[None]
fn_hard = fn_hard.sum(0, keepdim=False)[None]
tp_hard = awesome_allgather_function.apply(tp_hard)
fp_hard = awesome_allgather_function.apply(fp_hard)
fn_hard = awesome_allgather_function.apply(fn_hard)
tp_hard = tp_hard.detach().cpu().numpy().sum(0)
fp_hard = fp_hard.detach().cpu().numpy().sum(0)
fn_hard = fn_hard.detach().cpu().numpy().sum(0)
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
if isinstance(self.network, DDP):
net = self.network.module
else:
net = self.network
ds = net.do_ds
net.do_ds = True
ret = nnUNetTrainer.run_training(self)
net.do_ds = ds
return ret
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
if isinstance(self.network, DDP):
net = self.network.module
else:
net = self.network
ds = net.do_ds
net.do_ds = False
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
# predictions as they come from the network go here
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
# this is for debug purposes
my_input_args = {'do_mirroring': do_mirroring,
'use_sliding_window': use_sliding_window,
'step_size': step_size,
'save_softmax': save_softmax,
'use_gaussian': use_gaussian,
'overwrite': overwrite,
'validation_folder_name': validation_folder_name,
'debug': debug,
'all_in_gpu': all_in_gpu,
'segmentation_export_kwargs': segmentation_export_kwargs,
}
save_json(my_input_args, join(output_folder, "validation_args.json"))
if do_mirroring:
if not self.data_aug_params['do_mirror']:
raise RuntimeError(
"We did not train with mirroring so you cannot do inference with mirroring enabled")
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(default_num_threads)
results = []
all_keys = list(self.dataset_val.keys())
my_keys = all_keys[self.local_rank::dist.get_world_size()]
# we cannot simply iterate over all_keys because we need to know pred_gt_tuples and valid_labels of all cases
# for evaluation (which is done by local rank 0)
for k in my_keys:
properties = load_pickle(self.dataset[k]['properties_file'])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
if k in my_keys:
if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \
(save_softmax and not isfile(join(output_folder, fname + ".npz"))):
data = np.load(self.dataset[k]['data_file'])['data']
print(k, data.shape)
data[-1][data[-1] == -1] = 0
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(join(output_folder, fname + ".npy"), softmax_pred)
softmax_pred = join(output_folder, fname + ".npy")
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order,
self.regions_class_order,
None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
_ = [i.get() for i in results]
self.print_to_log_file("finished prediction")
distributed.barrier()
if self.local_rank == 0:
# evaluate raw predictions
self.print_to_log_file("evaluation of raw predictions")
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"),
json_name=job_name + " val tiled %s" % (str(use_sliding_window)),
json_author="Fabian",
json_task=task, num_threads=default_num_threads)
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
e = None
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError as e:
attempts += 1
sleep(1)
if not success:
print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder))
if e is not None:
raise e
self.network.train(current_mode)
net.do_ds = ds
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[
np.ndarray, np.ndarray]:
if pad_border_mode == 'constant' and pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
if do_mirroring and mirror_axes is None:
mirror_axes = self.data_aug_params['mirror_axes']
if do_mirroring:
assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \
"was done without mirroring"
valid = list((SegmentationNetwork, nn.DataParallel, DDP))
assert isinstance(self.network, tuple(valid))
if isinstance(self.network, DDP):
net = self.network.module
else:
net = self.network
ds = net.do_ds
net.do_ds = False
ret = net.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window, step_size=step_size,
patch_size=self.patch_size, regions_class_order=self.regions_class_order,
use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,
mixed_precision=mixed_precision)
net.do_ds = ds
return ret
def load_checkpoint_ram(self, checkpoint, train=True):
"""
used for if the checkpoint is already in ram
:param checkpoint:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in checkpoint['state_dict'].items():
key = k
if key not in curr_state_dict_keys:
print("duh")
key = key[7:]
new_state_dict[key] = value
if self.fp16:
self._maybe_init_amp()
if 'amp_grad_scaler' in checkpoint.keys():
self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler'])
self.network.load_state_dict(new_state_dict)
self.epoch = checkpoint['epoch']
if train:
optimizer_state_dict = checkpoint['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[
'lr_scheduler_state_dict'] is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
if issubclass(self.lr_scheduler.__class__, _LRScheduler):
self.lr_scheduler.step(self.epoch)
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[
'plot_stuff']
# after the training is done, the epoch is incremented one more time in my old code. This results in
# self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because
# len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here
if self.epoch != len(self.all_tr_losses):
self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is "
"due to an old bug and should only appear when you are loading old models. New "
"models should have this fixed! self.epoch is now set to len(self.all_tr_losses)")
self.epoch = len(self.all_tr_losses)
self.all_tr_losses = self.all_tr_losses[:self.epoch]
self.all_val_losses = self.all_val_losses[:self.epoch]
self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch]
self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch]
| 30,456
| 50.447635
| 132
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_CascadeFullRes.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
from time import sleep
import matplotlib
from nnunet.configuration import default_num_threads
from nnunet.postprocessing.connected_components import determine_postprocessing
from nnunet.training.data_augmentation.default_data_augmentation import get_default_augmentation, \
get_moreDA_augmentation
from nnunet.training.dataloading.dataset_loading import DataLoader3D, unpack_dataset
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.paths import network_training_output_dir
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.one_hot_encoding import to_one_hot
import shutil
from torch import nn
matplotlib.use("agg")
class nnUNetTrainerV2CascadeFullRes(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, previous_trainer, fp16)
if self.output_folder is not None:
task = self.output_folder.split("/")[-3]
plans_identifier = self.output_folder.split("/")[-2].split("__")[-1]
folder_with_segs_prev_stage = join(network_training_output_dir, "3d_lowres",
task, previous_trainer + "__" + plans_identifier, "pred_next_stage")
self.folder_with_segs_from_prev_stage = folder_with_segs_prev_stage
# Do not put segs_prev_stage into self.output_folder as we need to unpack them for performance and we
# don't want to do that in self.output_folder because that one is located on some network drive.
else:
self.folder_with_segs_from_prev_stage = None
def do_split(self):
super().do_split()
for k in self.dataset:
self.dataset[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
assert isfile(self.dataset[k]['seg_from_prev_stage_file']), \
"seg from prev stage missing: %s" % (self.dataset[k]['seg_from_prev_stage_file'])
for k in self.dataset_val:
self.dataset_val[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
for k in self.dataset_tr:
self.dataset_tr[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
True, oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, True,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
else:
raise NotImplementedError("2D has no cascade")
return dl_tr, dl_val
def process_plans(self, plans):
super().process_plans(plans)
self.num_input_channels += (self.num_classes - 1) # for seg from prev stage
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["num_cached_per_thread"] = 2
self.data_aug_params['move_last_seg_chanel_to_data'] = True
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
# we have 2 channels now because the segmentation from the previous stage is stored in 'seg' as well until it
# is moved to 'data' at the end
self.data_aug_params['selected_seg_channels'] = [0, 1]
# needed for converting the segmentation from the previous stage to one hot
self.data_aug_params['all_segmentation_labels'] = list(range(1, self.num_classes))
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
if not self.was_initialized:
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
if not isdir(self.folder_with_segs_from_prev_stage):
raise RuntimeError(
"Cannot run final stage of cascade. Run corresponding 3d_lowres first and predict the "
"segmentations for the next stage")
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
current_mode = self.network.training
self.network.eval()
# save whether network is in deep supervision mode or not
ds = self.network.do_ds
# disable deep supervision
self.network.do_ds = False
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
if self.dataset_val is None:
self.load_dataset()
self.do_split()
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
# this is for debug purposes
my_input_args = {'do_mirroring': do_mirroring,
'use_sliding_window': use_sliding_window,
'step': step_size,
'save_softmax': save_softmax,
'use_gaussian': use_gaussian,
'overwrite': overwrite,
'validation_folder_name': validation_folder_name,
'debug': debug,
'all_in_gpu': all_in_gpu,
'segmentation_export_kwargs': segmentation_export_kwargs,
}
save_json(my_input_args, join(output_folder, "validation_args.json"))
if do_mirroring:
if not self.data_aug_params['do_mirror']:
raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled")
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(default_num_threads)
results = []
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \
(save_softmax and not isfile(join(output_folder, fname + ".npz"))):
data = np.load(self.dataset[k]['data_file'])['data']
# concat segmentation of previous step
seg_from_prev_stage = np.load(join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz"))['data'][None]
print(k, data.shape)
data[-1][data[-1] == -1] = 0
data_for_net = np.concatenate((data[:-1], to_one_hot(seg_from_prev_stage[0], range(1, self.num_classes))))
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data_for_net,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(join(output_folder, fname + ".npy"), softmax_pred)
softmax_pred = join(output_folder, fname + ".npy")
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, None, None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
_ = [i.get() for i in results]
self.print_to_log_file("finished prediction")
# evaluate raw predictions
self.print_to_log_file("evaluation of raw predictions")
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"),
json_name=job_name + " val tiled %s" % (str(use_sliding_window)),
json_author="Fabian",
json_task=task, num_threads=default_num_threads)
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
e = None
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError as e:
attempts += 1
sleep(1)
if not success:
print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder))
if e is not None:
raise e
# restore network deep supervision mode
self.network.train(current_mode)
self.network.do_ds = ds
| 19,421
| 54.176136
| 128
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \
get_patch_size, default_3D_augmentation_params
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import autocast
from nnunet.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
class nnUNetTrainerV2(nnUNetTrainer):
"""
Info for Fabian: same as internal nnUNetTrainerV2_2
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.pin_memory = True
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- enforce to only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size, use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
"""
gradient clipping improves training stability
:param data_generator:
:param do_backprop:
:param run_online_evaluation:
:return:
"""
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def do_split(self):
"""
The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,
so always the same) and save it as splits_final.pkl file in the preprocessed data directory.
Sometimes you may want to create your own split for various reasons. For this you will need to create your own
splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in
it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)
and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to
use a random 80:20 data split.
:return:
"""
if self.fold == "all":
# if fold==all then we use all images for training and validation
tr_keys = val_keys = list(self.dataset.keys())
else:
splits_file = join(self.dataset_directory, "splits_final.pkl")
# if the split file does not exist we need to create it
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold < len(splits):
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
else:
self.print_to_log_file("INFO: Requested fold %d but split file only has %d folds. I am now creating a "
"random 80:20 split!" % (self.fold, len(splits)))
# if we request a fold that is not in the split file, create a random 80:20 split
rnd = np.random.RandomState(seed=12345 + self.fold)
keys = np.sort(list(self.dataset.keys()))
idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)
idx_val = [i for i in range(len(keys)) if i not in idx_tr]
tr_keys = [keys[i] for i in idx_tr]
val_keys = [keys[i] for i in idx_val]
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def setup_DA_params(self):
"""
- we increase roation angle from [-15, 15] to [-30, 30]
- scale range is now (0.7, 1.4), was (0.85, 1.25)
- we don't do elastic deformation anymore
:return:
"""
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["num_cached_per_thread"] = 2
def maybe_update_lr(self, epoch=None):
"""
if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1
(maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.
herefore we need to do +1 here)
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
"""
overwrite patient-based early stopping. Always run to 1000 epochs
:return:
"""
super().on_epoch_end()
continue_training = self.epoch < self.max_num_epochs
# it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the
# estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95
if self.epoch == 100:
if self.all_val_eval_metrics[-1] == 0:
self.optimizer.param_groups[0]["momentum"] = 0.95
self.network.apply(InitWeights_He(1e-2))
self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too "
"high momentum. High momentum (0.99) is good for datasets where it works, but "
"sometimes causes issues such as this one. Momentum has now been reduced to "
"0.95 and network weights have been reinitialized")
return continue_training
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret
| 21,273
| 48.018433
| 134
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerCascadeFullRes.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing.pool import Pool
from time import sleep
import matplotlib
from nnunet.postprocessing.connected_components import determine_postprocessing
from nnunet.training.data_augmentation.default_data_augmentation import get_default_augmentation
from nnunet.training.dataloading.dataset_loading import DataLoader3D, unpack_dataset
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.paths import network_training_output_dir
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from nnunet.utilities.one_hot_encoding import to_one_hot
import shutil
matplotlib.use("agg")
class nnUNetTrainerCascadeFullRes(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainer", fp16=False):
super(nnUNetTrainerCascadeFullRes, self).__init__(plans_file, fold, output_folder, dataset_directory,
batch_dice, stage, unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, previous_trainer, fp16)
if self.output_folder is not None:
task = self.output_folder.split("/")[-3]
plans_identifier = self.output_folder.split("/")[-2].split("__")[-1]
folder_with_segs_prev_stage = join(network_training_output_dir, "3d_lowres",
task, previous_trainer + "__" + plans_identifier, "pred_next_stage")
if not isdir(folder_with_segs_prev_stage):
raise RuntimeError(
"Cannot run final stage of cascade. Run corresponding 3d_lowres first and predict the "
"segmentations for the next stage")
self.folder_with_segs_from_prev_stage = folder_with_segs_prev_stage
# Do not put segs_prev_stage into self.output_folder as we need to unpack them for performance and we
# don't want to do that in self.output_folder because that one is located on some network drive.
else:
self.folder_with_segs_from_prev_stage = None
def do_split(self):
super(nnUNetTrainerCascadeFullRes, self).do_split()
for k in self.dataset:
self.dataset[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
assert isfile(self.dataset[k]['seg_from_prev_stage_file']), \
"seg from prev stage missing: %s" % (self.dataset[k]['seg_from_prev_stage_file'])
for k in self.dataset_val:
self.dataset_val[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
for k in self.dataset_tr:
self.dataset_tr[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz")
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
True, oversample_foreground_percent=self.oversample_foreground_percent)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, True,
oversample_foreground_percent=self.oversample_foreground_percent)
else:
raise NotImplementedError
return dl_tr, dl_val
def process_plans(self, plans):
super(nnUNetTrainerCascadeFullRes, self).process_plans(plans)
self.num_input_channels += (self.num_classes - 1) # for seg from prev stage
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['move_last_seg_chanel_to_data'] = True
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0
# we have 2 channels now because the segmentation from the previous stage is stored in 'seg' as well until it
# is moved to 'data' at the end
self.data_aug_params['selected_seg_channels'] = [0, 1]
# needed for converting the segmentation from the previous stage to one hot
self.data_aug_params['all_segmentation_labels'] = list(range(1, self.num_classes))
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.setup_DA_params()
if self.folder_with_preprocessed_data is not None:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())))
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())))
else:
pass
self.initialize_network()
assert isinstance(self.network, SegmentationNetwork)
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
if do_mirroring:
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(2)
results = []
transpose_backward = self.plans.get('transpose_backward')
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
data = np.load(self.dataset[k]['data_file'])['data']
# concat segmentation of previous step
seg_from_prev_stage = np.load(join(self.folder_with_segs_from_prev_stage,
k + "_segFromPrevStage.npz"))['data'][None]
print(data.shape)
data[-1][data[-1] == -1] = 0
data_for_net = np.concatenate((data[:-1], to_one_hot(seg_from_prev_stage[0], range(1, self.num_classes))))
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data_for_net,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
if transpose_backward is not None:
transpose_backward = self.plans.get('transpose_backward')
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in transpose_backward])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(fname + ".npy", softmax_pred)
softmax_pred = fname + ".npy"
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, self.regions_class_order,
None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
_ = [i.get() for i in results]
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"), json_name=job_name,
json_author="Fabian", json_description="",
json_task=task)
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError:
attempts += 1
sleep(1)
self.network.train(current_mode)
export_pool.close()
export_pool.join()
| 15,950
| 54.193772
| 128
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_DP.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.network_architecture.generic_UNet_DP import Generic_UNet_DP
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
from torch.cuda.amp import autocast
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.utils import clip_grad_norm_
class nnUNetTrainerV2_DP(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, num_gpus=1, distribute_batch_size=False, fp16=False):
super(nnUNetTrainerV2_DP, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, num_gpus, distribute_batch_size, fp16)
self.num_gpus = num_gpus
self.distribute_batch_size = distribute_batch_size
self.dice_smooth = 1e-5
self.dice_do_BG = False
self.loss = None
self.loss_weights = None
def setup_DA_params(self):
super(nnUNetTrainerV2_DP, self).setup_DA_params()
self.data_aug_params['num_threads'] = 8 * self.num_gpus
def process_plans(self, plans):
super(nnUNetTrainerV2_DP, self).process_plans(plans)
if not self.distribute_batch_size:
self.batch_size = self.num_gpus * self.plans['plans_per_stage'][self.stage]['batch_size']
else:
if self.batch_size < self.num_gpus:
print("WARNING: self.batch_size < self.num_gpus. Will not be able to use the GPUs well")
elif self.batch_size % self.num_gpus != 0:
print("WARNING: self.batch_size % self.num_gpus != 0. Will not be able to use the GPUs well")
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here configure the loss for deep supervision ############
net_numpool = len(self.net_num_pool_op_kernel_sizes)
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.loss_weights = weights
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
replace genericUNet with the implementation of above for super speeds
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet_DP(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_training(self):
self.maybe_update_lr(self.epoch)
# amp must be initialized before DP
ds = self.network.do_ds
self.network.do_ds = True
self.network = DataParallel(self.network, tuple(range(self.num_gpus)), )
ret = nnUNetTrainer.run_training(self)
self.network = self.network.module
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation)
if run_online_evaluation:
ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret
self.run_online_evaluation(tp_hard, fp_hard, fn_hard)
else:
ces, tps, fps, fns = ret
del data, target
l = self.compute_loss(ces, tps, fps, fns)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation)
if run_online_evaluation:
ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret
self.run_online_evaluation(tp_hard, fp_hard, fn_hard)
else:
ces, tps, fps, fns = ret
del data, target
l = self.compute_loss(ces, tps, fps, fns)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, tp_hard, fp_hard, fn_hard):
tp_hard = tp_hard.detach().cpu().numpy().mean(0)
fp_hard = fp_hard.detach().cpu().numpy().mean(0)
fn_hard = fn_hard.detach().cpu().numpy().mean(0)
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def compute_loss(self, ces, tps, fps, fns):
# we now need to effectively reimplement the loss
loss = None
for i in range(len(ces)):
if not self.dice_do_BG:
tp = tps[i][:, 1:]
fp = fps[i][:, 1:]
fn = fns[i][:, 1:]
else:
tp = tps[i]
fp = fps[i]
fn = fns[i]
if self.batch_dice:
tp = tp.sum(0)
fp = fp.sum(0)
fn = fn.sum(0)
else:
pass
nominator = 2 * tp + self.dice_smooth
denominator = 2 * tp + fp + fn + self.dice_smooth
dice_loss = (- nominator / denominator).mean()
if loss is None:
loss = self.loss_weights[i] * (ces[i].mean() + dice_loss)
else:
loss += self.loss_weights[i] * (ces[i].mean() + dice_loss)
###########
return loss
| 11,682
| 44.459144
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_fp32.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_fp32(nnUNetTrainerV2):
"""
Info for Fabian: same as internal nnUNetTrainerV2_2
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, False)
| 1,225
| 42.785714
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/network_trainer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _warnings import warn
from typing import Tuple
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.network_architecture.neural_network import SegmentationNetwork
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch.optim.lr_scheduler import _LRScheduler
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
from tqdm import trange
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
"""
A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such
as the training loop, tracking of training and validation losses (and the target metric if you implement it)
Training can be terminated early if the validation loss (or the target metric if implemented) do not improve
anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth
results.
What you need to override:
- __init__
- initialize
- run_online_evaluation (optional)
- finish_online_evaluation (optional)
- validate
- predict_test_case
"""
self.fp16 = fp16
self.amp_grad_scaler = None
if deterministic:
np.random.seed(12345)
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################
self.dataset = None # these can be None for inference mode
self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split
################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################
self.patience = 50
self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new
# if this is too low then the moving average will be too noisy and the training may terminate early. If it is
# too high the training will take forever
self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new
self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold
################# LEAVE THESE ALONE ################################################
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = [] # does not have to be used
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
self.use_progress_bar = False
if 'nnunet_use_progress_bar' in os.environ.keys():
self.use_progress_bar = bool(int(os.environ['nnunet_use_progress_bar']))
################# Settings for saving checkpoints ##################################
self.save_every = 50
self.save_latest_only = True # if false it will not store/overwrite _latest but separate files each
# time an intermediate checkpoint is created
self.save_intermediate_checkpoints = True # whether or not to save checkpoint_latest
self.save_best_checkpoint = True # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
self.save_final_checkpoint = True # whether or not to save the final checkpoint
@abstractmethod
def initialize(self, training=True):
"""
create self.output_folder
modify self.output_folder if you are doing cross-validation (one folder per fold)
set self.tr_gen and self.val_gen
call self.initialize_network and self.initialize_optimizer_and_scheduler (important!)
finally set self.was_initialized to True
:param training:
:return:
"""
@abstractmethod
def load_dataset(self):
pass
def do_split(self):
"""
This is a suggestion for if your dataset is a dictionary (my personal standard)
:return:
"""
splits_file = join(self.dataset_directory, "splits_final.pkl")
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold == "all":
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range(self.epoch + 1))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr")
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False")
if len(self.all_val_losses_tr_mode) > 0:
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True")
if len(self.all_val_eval_metrics) == len(x_values):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,
timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and hasattr(self.lr_scheduler,
'state_dict'): # not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
# WTF is this!?
# for key in lr_sched_state_dct.keys():
# lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
save_this = {
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,
self.all_val_eval_metrics),
'best_stuff' : (self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA)}
if self.amp_grad_scaler is not None:
save_this['amp_grad_scaler'] = self.amp_grad_scaler.state_dict()
torch.save(save_this, fname)
self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
if self.fold is None:
raise RuntimeError("Cannot load best checkpoint if self.fold is None")
if isfile(join(self.output_folder, "model_best.model")):
self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train)
else:
self.print_to_log_file("WARNING! model_best.model does not exist! Cannot load best checkpoint. Falling "
"back to load_latest_checkpoint")
self.load_latest_checkpoint(train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train)
if isfile(join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train)
if isfile(join(self.output_folder, "model_best.model")):
return self.load_best_checkpoint(train)
raise RuntimeError("No checkpoint found")
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
# saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))
saved_model = torch.load(fname, map_location=torch.device('cpu'))
self.load_checkpoint_ram(saved_model, train)
@abstractmethod
def initialize_network(self):
"""
initialize self.network here
:return:
"""
pass
@abstractmethod
def initialize_optimizer_and_scheduler(self):
"""
initialize self.optimizer and self.lr_scheduler (if applicable) here
:return:
"""
pass
def load_checkpoint_ram(self, checkpoint, train=True):
"""
used for if the checkpoint is already in ram
:param checkpoint:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in checkpoint['state_dict'].items():
key = k
if key not in curr_state_dict_keys and key.startswith('module.'):
key = key[7:]
new_state_dict[key] = value
if self.fp16:
self._maybe_init_amp()
if 'amp_grad_scaler' in checkpoint.keys():
self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler'])
self.network.load_state_dict(new_state_dict)
self.epoch = checkpoint['epoch']
if train:
optimizer_state_dict = checkpoint['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[
'lr_scheduler_state_dict'] is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
if issubclass(self.lr_scheduler.__class__, _LRScheduler):
self.lr_scheduler.step(self.epoch)
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[
'plot_stuff']
# load best loss (if present)
if 'best_stuff' in checkpoint.keys():
self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA = checkpoint[
'best_stuff']
# after the training is done, the epoch is incremented one more time in my old code. This results in
# self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because
# len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here
if self.epoch != len(self.all_tr_losses):
self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is "
"due to an old bug and should only appear when you are loading old models. New "
"models should have this fixed! self.epoch is now set to len(self.all_tr_losses)")
self.epoch = len(self.all_tr_losses)
self.all_tr_losses = self.all_tr_losses[:self.epoch]
self.all_val_losses = self.all_val_losses[:self.epoch]
self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch]
self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch]
self._maybe_init_amp()
def _maybe_init_amp(self):
if self.fp16 and self.amp_grad_scaler is None and torch.cuda.is_available():
self.amp_grad_scaler = GradScaler()
def plot_network_architecture(self):
"""
can be implemented (see nnUNetTrainer) but does not have to. Not implemented here because it imposes stronger
assumptions on the presence of class variables
:return:
"""
pass
def run_training(self):
_ = self.tr_gen.next()
_ = self.val_gen.next()
if torch.cuda.is_available():
torch.cuda.empty_cache()
self._maybe_init_amp()
maybe_mkdir_p(self.output_folder)
self.plot_network_architecture()
if cudnn.benchmark and cudnn.deterministic:
warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
# train one epoch
self.network.train()
if self.use_progress_bar:
with trange(self.num_batches_per_epoch) as tbar:
for b in tbar:
tbar.set_description("Epoch {}/{}".format(self.epoch+1, self.max_num_epochs))
l = self.run_iteration(self.tr_gen, True)
tbar.set_postfix(loss=l)
train_losses_epoch.append(l)
else:
for _ in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("validation loss: %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
# validation with train=True
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("validation loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
self.update_train_loss_MA() # needed for lr scheduler and stopping of training
continue_training = self.on_epoch_end()
epoch_end_time = time()
if not continue_training:
# allows for early stopping
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time - epoch_start_time))
self.epoch -= 1 # if we don't do this we can get a problem with loading model_final_checkpoint.
if self.save_final_checkpoint: self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
"""
Saves a checkpoint every save_ever epochs.
:return:
"""
if self.save_intermediate_checkpoints and (self.epoch % self.save_every == (self.save_every - 1)):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
"""
If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping
(not a minimization, but a maximization of a metric and therefore the - in the latter case)
:return:
"""
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
# update patience
continue_training = True
if self.patience is not None:
# if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,
# initialize them
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
# check if the current epoch is the best one according to moving average of validation criterion. If so
# then save 'best' model
# Do not use this for validation. This is intended for test set prediction only.
#self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
#self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
#self.print_to_log_file("saving best epoch checkpoint...")
if self.save_best_checkpoint: self.save_checkpoint(join(self.output_folder, "model_best.model"))
# Now see if the moving average of the train loss has improved. If yes then reset patience, else
# increase patience
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
#self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
pass
#self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
# (self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
# if patience has reached its maximum then finish training (provided lr is low enough)
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
#self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
#self.print_to_log_file("My patience ended")
continue_training = False
else:
pass
#self.print_to_log_file(
# "Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_
# metrics
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self):
"""
Can be implemented, does not have to
:return:
"""
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
"""
stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param num_iters:
:param init_value:
:param final_value:
:param beta:
:return:
"""
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1 / num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
# +1 because this one here is not designed to have negative loss...
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_loss = avg_loss / (1 - beta ** batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# Record the best loss
if smoothed_loss < best_loss or batch_num == 1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| 30,849
| 41.376374
| 150
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from multiprocessing import Pool
from time import sleep
from typing import Tuple, List
import matplotlib
import nnunet
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.postprocessing.connected_components import determine_postprocessing
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_default_augmentation, get_patch_size
from nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from nnunet.training.network_training.network_trainer import NetworkTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
from torch.optim import lr_scheduler
matplotlib.use("agg")
class nnUNetTrainer(NetworkTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
"""
:param deterministic:
:param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or
None if you wish to load some checkpoint and do inference only
:param plans_file: the pkl file generated by preprocessing. This file will determine all design choices
:param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder,
not the entire path). This is where the preprocessed data lies that will be used for network training. We made
this explicitly available so that differently preprocessed data can coexist and the user can choose what to use.
Can be None if you are doing inference only.
:param output_folder: where to store parameters, plot progress and to the validation
:param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required
because the split information is stored in this directory. For running prediction only this input is not
required and may be set to None
:param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the
batch is a pseudo volume?
:param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be
specified for training:
if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0
:param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but
is considerably slower! Running unpack_data=False with 2d should never be done!
IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args
in your init accordingly. Otherwise checkpoints won't load properly!
"""
super(nnUNetTrainer, self).__init__(deterministic, fp16)
self.unpack_data = unpack_data
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
# set through arguments from init
self.stage = stage
self.experiment_name = self.__class__.__name__
self.plans_file = plans_file
self.output_folder = output_folder
self.dataset_directory = dataset_directory
self.output_folder_base = self.output_folder
self.fold = fold
self.plans = None
# if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it
# irrelevant
if self.dataset_directory is not None and isdir(self.dataset_directory):
self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations")
else:
self.gt_niftis_folder = None
self.folder_with_preprocessed_data = None
# set in self.initialize()
self.dl_tr = self.dl_val = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \
self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None
self.batch_dice = batch_dice
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
self.online_eval_foreground_dc = []
self.online_eval_tp = []
self.online_eval_fp = []
self.online_eval_fn = []
self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \
self.min_region_size_per_class = self.min_size_per_class = None
self.inference_pad_border_mode = "constant"
self.inference_pad_kwargs = {'constant_values': 0}
self.update_fold(fold)
self.pad_all_sides = None
self.lr_scheduler_eps = 1e-3
self.lr_scheduler_patience = 30
self.initial_lr = 3e-4
self.weight_decay = 3e-5
self.oversample_foreground_percent = 0.33
self.conv_per_stage = None
self.regions_class_order = None
def update_fold(self, fold):
"""
used to swap between folds for inference (ensemble of models from cross-validation)
DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS
:param fold:
:return:
"""
if fold is not None:
if isinstance(fold, str):
assert fold == "all", "if self.fold is a string then it must be \'all\'"
if self.output_folder.endswith("%s" % str(self.fold)):
self.output_folder = self.output_folder_base
self.output_folder = join(self.output_folder, "%s" % str(fold))
else:
if self.output_folder.endswith("fold_%s" % str(self.fold)):
self.output_folder = self.output_folder_base
self.output_folder = join(self.output_folder, "fold_%s" % str(fold))
self.fold = fold
def setup_DA_params(self):
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
if training:
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
self.print_to_log_file("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
self.print_to_log_file("done")
else:
self.print_to_log_file(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
# assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
self.was_initialized = True
def initialize_network(self):
"""
This is specific to the U-Net and must be adapted for other network architectures
:return:
"""
# self.print_to_log_file(self.net_num_pool_op_kernel_sizes)
# self.print_to_log_file(self.net_conv_kernel_sizes)
net_numpool = len(self.net_num_pool_op_kernel_sizes)
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
self.network.inference_apply_nonlin = softmax_helper
if torch.cuda.is_available():
self.network.cuda()
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
amsgrad=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def plot_network_architecture(self):
try:
from batchgenerators.utilities.file_and_folder_operations import join
import hiddenlayer as hl
if torch.cuda.is_available():
g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)).cuda(),
transforms=None)
else:
g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)),
transforms=None)
g.save(join(self.output_folder, "network_architecture.pdf"))
del g
except Exception as e:
self.print_to_log_file("Unable to plot network architecture:")
self.print_to_log_file(e)
self.print_to_log_file("\nprinting the network instead:\n")
self.print_to_log_file(self.network)
self.print_to_log_file("\n")
finally:
if torch.cuda.is_available():
torch.cuda.empty_cache()
def run_training(self):
dct = OrderedDict()
for k in self.__dir__():
if not k.startswith("__"):
if not callable(getattr(self, k)):
dct[k] = str(getattr(self, k))
del dct['plans']
del dct['intensity_properties']
del dct['dataset']
del dct['dataset_tr']
del dct['dataset_val']
save_json(dct, join(self.output_folder, "debug.json"))
import shutil
shutil.copy(self.plans_file, join(self.output_folder_base, "plans.pkl"))
super(nnUNetTrainer, self).run_training()
def load_plans_file(self):
"""
This is what actually configures the entire experiment. The plans file is generated by experiment planning
:return:
"""
self.plans = load_pickle(self.plans_file)
def process_plans(self, plans):
if self.stage is None:
assert len(list(plans['plans_per_stage'].keys())) == 1, \
"If self.stage is None then there can be only one stage in the plans file. That seems to not be the " \
"case. Please specify which stage of the cascade must be trained"
self.stage = list(plans['plans_per_stage'].keys())[0]
self.plans = plans
stage_plans = self.plans['plans_per_stage'][self.stage]
self.batch_size = stage_plans['batch_size']
self.net_pool_per_axis = stage_plans['num_pool_per_axis']
self.patch_size = np.array(stage_plans['patch_size']).astype(int)
self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug']
if 'pool_op_kernel_sizes' not in stage_plans.keys():
assert 'num_pool_per_axis' in stage_plans.keys()
self.print_to_log_file("WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...")
self.net_num_pool_op_kernel_sizes = []
for i in range(max(self.net_pool_per_axis)):
curr = []
for j in self.net_pool_per_axis:
if (max(self.net_pool_per_axis) - j) <= i:
curr.append(2)
else:
curr.append(1)
self.net_num_pool_op_kernel_sizes.append(curr)
else:
self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
if 'conv_kernel_sizes' not in stage_plans.keys():
self.print_to_log_file("WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...")
self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1)
else:
self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes']
self.pad_all_sides = None # self.patch_size
self.intensity_properties = plans['dataset_properties']['intensityproperties']
self.normalization_schemes = plans['normalization_schemes']
self.base_num_features = plans['base_num_features']
self.num_input_channels = plans['num_modalities']
self.num_classes = plans['num_classes'] + 1 # background is no longer in num_classes
self.classes = plans['all_classes']
self.use_mask_for_norm = plans['use_mask_for_norm']
self.only_keep_largest_connected_component = plans['keep_only_largest_region']
self.min_region_size_per_class = plans['min_region_size_per_class']
self.min_size_per_class = None # DONT USE THIS. plans['min_size_per_class']
if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None:
print("WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. "
"You should rerun preprocessing. We will proceed and assume that both transpose_foward "
"and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!")
plans['transpose_forward'] = [0, 1, 2]
plans['transpose_backward'] = [0, 1, 2]
self.transpose_forward = plans['transpose_forward']
self.transpose_backward = plans['transpose_backward']
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
if "conv_per_stage" in plans.keys(): # this ha sbeen added to the plans only recently
self.conv_per_stage = plans['conv_per_stage']
else:
self.conv_per_stage = 2
def load_dataset(self):
self.dataset = load_dataset(self.folder_with_preprocessed_data)
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
False, oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
else:
dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
return dl_tr, dl_val
def preprocess_patient(self, input_files):
"""
Used to predict new unseen data. Not used for the preprocessing of the training/test data
:param input_files:
:return:
"""
from nnunet.training.model_restore import recursive_find_python_class
preprocessor_name = self.plans.get('preprocessor_name')
if preprocessor_name is None:
if self.threeD:
preprocessor_name = "GenericPreprocessor"
else:
preprocessor_name = "PreprocessorFor2D"
print("using preprocessor", preprocessor_name)
preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")],
preprocessor_name,
current_module="nnunet.preprocessing")
assert preprocessor_class is not None, "Could not find preprocessor %s in nnunet.preprocessing" % \
preprocessor_name
preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm,
self.transpose_forward, self.intensity_properties)
d, s, properties = preprocessor.preprocess_test_case(input_files,
self.plans['plans_per_stage'][self.stage][
'current_spacing'])
return d, s, properties
def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None,
softmax_ouput_file: str = None, mixed_precision: bool = True) -> None:
"""
Use this to predict new data
:param input_files:
:param output_file:
:param softmax_ouput_file:
:param mixed_precision:
:return:
"""
print("preprocessing...")
d, s, properties = self.preprocess_patient(input_files)
print("predicting...")
pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"],
mirror_axes=self.data_aug_params['mirror_axes'],
use_sliding_window=True, step_size=0.5,
use_gaussian=True, pad_border_mode='constant',
pad_kwargs={'constant_values': 0},
verbose=True, all_in_gpu=False,
mixed_precision=mixed_precision)[1]
pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
print("resampling to original spacing and nifti export...")
save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order,
self.regions_class_order, None, None, softmax_ouput_file,
None, force_separate_z=force_separate_z,
interpolation_order_z=interpolation_order_z)
print("done")
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
:param data:
:param do_mirroring:
:param mirror_axes:
:param use_sliding_window:
:param step_size:
:param use_gaussian:
:param pad_border_mode:
:param pad_kwargs:
:param all_in_gpu:
:param verbose:
:return:
"""
if pad_border_mode == 'constant' and pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
if do_mirroring and mirror_axes is None:
mirror_axes = self.data_aug_params['mirror_axes']
if do_mirroring:
assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \
"was done without mirroring"
valid = list((SegmentationNetwork, nn.DataParallel))
assert isinstance(self.network, tuple(valid))
current_mode = self.network.training
self.network.eval()
ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window, step_size=step_size,
patch_size=self.patch_size, regions_class_order=self.regions_class_order,
use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,
mixed_precision=mixed_precision)
self.network.train(current_mode)
return ret
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
if debug=True then the temporary files generated for postprocessing determination will be kept
"""
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
# predictions as they come from the network go here
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
# this is for debug purposes
my_input_args = {'do_mirroring': do_mirroring,
'use_sliding_window': use_sliding_window,
'step_size': step_size,
'save_softmax': save_softmax,
'use_gaussian': use_gaussian,
'overwrite': overwrite,
'validation_folder_name': validation_folder_name,
'debug': debug,
'all_in_gpu': all_in_gpu,
'segmentation_export_kwargs': segmentation_export_kwargs,
}
save_json(my_input_args, join(output_folder, "validation_args.json"))
if do_mirroring:
if not self.data_aug_params['do_mirror']:
raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled")
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(default_num_threads)
results = []
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \
(save_softmax and not isfile(join(output_folder, fname + ".npz"))):
data = np.load(self.dataset[k]['data_file'])['data']
print(k, data.shape)
data[-1][data[-1] == -1] = 0
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(join(output_folder, fname + ".npy"), softmax_pred)
softmax_pred = join(output_folder, fname + ".npy")
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, self.regions_class_order,
None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
_ = [i.get() for i in results]
self.print_to_log_file("finished prediction")
# evaluate raw predictions
self.print_to_log_file("evaluation of raw predictions")
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"),
json_name=job_name + " val tiled %s" % (str(use_sliding_window)),
json_author="Fabian",
json_task=task, num_threads=default_num_threads)
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
e = None
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError as e:
attempts += 1
sleep(1)
if not success:
print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder))
if e is not None:
raise e
self.network.train(current_mode)
def run_online_evaluation(self, output, target):
with torch.no_grad():
num_classes = output.shape[1]
output_softmax = softmax_helper(output)
output_seg = output_softmax.argmax(1)
target = target[:, 0]
axes = tuple(range(1, len(target.shape)))
tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
for c in range(1, num_classes):
tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes)
fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes)
fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes)
tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy()
fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy()
fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy()
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def finish_online_evaluation(self):
self.online_eval_tp = np.sum(self.online_eval_tp, 0)
self.online_eval_fp = np.sum(self.online_eval_fp, 0)
self.online_eval_fn = np.sum(self.online_eval_fn, 0)
global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in
zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]
if not np.isnan(i)]
self.all_val_eval_metrics.append(np.mean(global_dc_per_class))
self.print_to_log_file("Average global foreground Dice:", str(global_dc_per_class))
self.print_to_log_file("(interpret this as an estimate for the Dice of the different classes. This is not "
"exact.)")
self.online_eval_foreground_dc = []
self.online_eval_tp = []
self.online_eval_fp = []
self.online_eval_fn = []
def save_checkpoint(self, fname, save_optimizer=True):
super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer)
info = OrderedDict()
info['init'] = self.init_args
info['name'] = self.__class__.__name__
info['class'] = str(self.__class__)
info['plans'] = self.plans
write_pickle(info, fname + ".pkl")
| 39,650
| 53.094134
| 142
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/BraTS2020/nnUNetTrainerV2BraTSRegions.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from torch import nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.loss_functions.dice_loss import DC_and_BCE_loss, get_tp_fp_fn_tn, SoftDiceLoss
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.network_training.nnUNetTrainerV2_DDP import nnUNetTrainerV2_DDP
from nnunet.utilities.distributed import awesome_allgather_function
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
class nnUNetTrainerV2BraTSRegions_BN(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = torch.nn.Softmax(1)
class nnUNetTrainerV2BraTSRegions(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.regions = get_brats_regions()
self.regions_class_order = (1, 2, 3)
self.loss = DC_and_BCE_loss({}, {'batch_dice': False, 'do_bg': True, 'smooth': 0})
def process_plans(self, plans):
super().process_plans(plans)
"""
The network has as many outputs as we have regions
"""
self.num_classes = len(self.regions)
def initialize_network(self):
"""inference_apply_nonlin to sigmoid"""
super().initialize_network()
self.network.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True, force_load_plans=False):
"""
this is a copy of nnUNetTrainerV2's initialize. We only add the regions to the data augmentation
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
regions=self.regions)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
# run brats specific validation
output_folder = join(self.output_folder, validation_folder_name)
evaluate_regions(output_folder, self.gt_niftis_folder, self.regions)
def run_online_evaluation(self, output, target):
output = output[0]
target = target[0]
with torch.no_grad():
out_sigmoid = torch.sigmoid(output)
out_sigmoid = (out_sigmoid > 0.5).float()
if self.threeD:
axes = (0, 2, 3, 4)
else:
axes = (0, 2, 3)
tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes)
tp_hard = tp.detach().cpu().numpy()
fp_hard = fp.detach().cpu().numpy()
fn_hard = fn.detach().cpu().numpy()
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
class nnUNetTrainerV2BraTSRegions_Dice(nnUNetTrainerV2BraTSRegions):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = SoftDiceLoss(apply_nonlin=torch.sigmoid, **{'batch_dice': False, 'do_bg': True, 'smooth': 0})
class nnUNetTrainerV2BraTSRegions_DDP(nnUNetTrainerV2_DDP):
def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True,
stage=None,
unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False):
super().__init__(plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, distribute_batch_size, fp16)
self.regions = get_brats_regions()
self.regions_class_order = (1, 2, 3)
self.loss = None
self.ce_loss = nn.BCEWithLogitsLoss()
def process_plans(self, plans):
super().process_plans(plans)
"""
The network has as many outputs as we have regions
"""
self.num_classes = len(self.regions)
def initialize_network(self):
"""inference_apply_nonlin to sigmoid"""
super().initialize_network()
self.network.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True, force_load_plans=False):
"""
this is a copy of nnUNetTrainerV2's initialize. We only add the regions to the data augmentation
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
if self.local_rank == 0:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
# we need to wait until worker 0 has finished unpacking
npz_files = subfiles(self.folder_with_preprocessed_data, suffix=".npz", join=False)
case_ids = [i[:-4] for i in npz_files]
all_present = all(
[isfile(join(self.folder_with_preprocessed_data, i + ".npy")) for i in case_ids])
while not all_present:
print("worker", self.local_rank, "is waiting for unpacking")
sleep(3)
all_present = all(
[isfile(join(self.folder_with_preprocessed_data, i + ".npy")) for i in case_ids])
# there is some slight chance that there may arise some error because dataloader are loading a file
# that is still being written by worker 0. We ignore this for now an address it only if it becomes
# relevant
# (this can occur because while worker 0 writes the file is technically present so the other workers
# will proceed and eventually try to read it)
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
# setting weights for deep supervision losses
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads'))
seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1))
print("seeds train", seeds_train)
print("seeds_val", seeds_val)
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
seeds_train=seeds_train,
seeds_val=seeds_val,
pin_memory=self.pin_memory,
regions=self.regions)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
self._maybe_init_amp()
self.network = DDP(self.network, self.local_rank)
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
# run brats specific validation
output_folder = join(self.output_folder, validation_folder_name)
evaluate_regions(output_folder, self.gt_niftis_folder, self.regions)
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
raise NotImplementedError("this class has not been changed to work with pytorch amp yet!")
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data, gpu_id=None)
target = to_cuda(target, gpu_id=None)
self.optimizer.zero_grad()
output = self.network(data)
del data
total_loss = None
for i in range(len(output)):
# Starting here it gets spicy!
axes = tuple(range(2, len(output[i].size())))
# network does not do softmax. We need to do softmax for dice
output_softmax = torch.sigmoid(output[i])
# get the tp, fp and fn terms we need
tp, fp, fn, _ = get_tp_fp_fn_tn(output_softmax, target[i], axes, mask=None)
# for dice, compute nominator and denominator so that we have to accumulate only 2 instead of 3 variables
# do_bg=False in nnUNetTrainer -> [:, 1:]
nominator = 2 * tp[:, 1:]
denominator = 2 * tp[:, 1:] + fp[:, 1:] + fn[:, 1:]
if self.batch_dice:
# for DDP we need to gather all nominator and denominator terms from all GPUS to do proper batch dice
nominator = awesome_allgather_function.apply(nominator)
denominator = awesome_allgather_function.apply(denominator)
nominator = nominator.sum(0)
denominator = denominator.sum(0)
else:
pass
ce_loss = self.ce_loss(output[i], target[i])
# we smooth by 1e-5 to penalize false positives if tp is 0
dice_loss = (- (nominator + 1e-5) / (denominator + 1e-5)).mean()
if total_loss is None:
total_loss = self.ds_loss_weights[i] * (ce_loss + dice_loss)
else:
total_loss += self.ds_loss_weights[i] * (ce_loss + dice_loss)
if run_online_evaluation:
with torch.no_grad():
output = output[0]
target = target[0]
out_sigmoid = torch.sigmoid(output)
out_sigmoid = (out_sigmoid > 0.5).float()
if self.threeD:
axes = (2, 3, 4)
else:
axes = (2, 3)
tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes)
tp_hard = awesome_allgather_function.apply(tp)
fp_hard = awesome_allgather_function.apply(fp)
fn_hard = awesome_allgather_function.apply(fn)
# print_if_rank0("after allgather", tp_hard.shape)
# print_if_rank0("after sum", tp_hard.shape)
self.run_online_evaluation(tp_hard.detach().cpu().numpy().sum(0),
fp_hard.detach().cpu().numpy().sum(0),
fn_hard.detach().cpu().numpy().sum(0))
del target
if do_backprop:
if not self.fp16 or amp is None or not torch.cuda.is_available():
total_loss.backward()
else:
with amp.scale_loss(total_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
_ = clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return total_loss.detach().cpu().numpy()
def run_online_evaluation(self, tp, fp, fn):
self.online_eval_foreground_dc.append(list((2 * tp) / (2 * tp + fp + fn + 1e-8)))
self.online_eval_tp.append(list(tp))
self.online_eval_fp.append(list(fp))
self.online_eval_fn.append(list(fn))
| 21,055
| 49.252983
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/BraTS2020/nnUNetTrainerV2BraTSRegions_moreDA.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from torch import nn
from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.loss_functions.dice_loss import DC_and_BCE_loss, get_tp_fp_fn_tn
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_DA3 import \
nnUNetTrainerV2_DA3_BN, get_insaneDA_augmentation2
class nnUNetTrainerV2BraTSRegions_DA3_BN(nnUNetTrainerV2_DA3_BN):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.regions = get_brats_regions()
self.regions_class_order = (1, 2, 3)
self.loss = DC_and_BCE_loss({}, {'batch_dice': False, 'do_bg': True, 'smooth': 0})
def process_plans(self, plans):
super().process_plans(plans)
"""
The network has as many outputs as we have regions
"""
self.num_classes = len(self.regions)
def initialize_network(self):
"""inference_apply_nonlin to sigmoid"""
super().initialize_network()
self.network.inference_apply_nonlin = nn.Sigmoid()
def initialize(self, training=True, force_load_plans=False):
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_insaneDA_augmentation2(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory,
regions=self.regions
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
# run brats specific validation
output_folder = join(self.output_folder, validation_folder_name)
evaluate_regions(output_folder, self.gt_niftis_folder, self.regions)
def run_online_evaluation(self, output, target):
output = output[0]
target = target[0]
with torch.no_grad():
out_sigmoid = torch.sigmoid(output)
out_sigmoid = (out_sigmoid > 0.5).float()
if self.threeD:
axes = (0, 2, 3, 4)
else:
axes = (0, 2, 3)
tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes)
tp_hard = tp.detach().cpu().numpy()
fp_hard = fp.detach().cpu().numpy()
fn_hard = fn.detach().cpu().numpy()
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
class nnUNetTrainerV2BraTSRegions_DA3(nnUNetTrainerV2BraTSRegions_DA3_BN):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = nn.Sigmoid()
class nnUNetTrainerV2BraTSRegions_DA3_BD(nnUNetTrainerV2BraTSRegions_DA3):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0})
class nnUNetTrainerV2BraTSRegions_DA3_BN_BD(nnUNetTrainerV2BraTSRegions_DA3_BN):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0})
class nnUNetTrainerV2BraTSRegions_DA4_BN(nnUNetTrainerV2BraTSRegions_DA3_BN):
def setup_DA_params(self):
nnUNetTrainerV2.setup_DA_params(self)
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["p_rot"] = 0.3
self.data_aug_params["scale_range"] = (0.65, 1.6)
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_elastic"] = True
self.data_aug_params["p_eldef"] = 0.2
self.data_aug_params["eldef_deformation_scale"] = (0, 0.25)
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 0.5
self.data_aug_params['gamma_range'] = (0.5, 1.6)
self.data_aug_params['num_cached_per_thread'] = 4
class nnUNetTrainerV2BraTSRegions_DA4_BN_BD(nnUNetTrainerV2BraTSRegions_DA4_BN):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0})
| 14,362
| 51.805147
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
nnUNetTrainerV2_insaneDA
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| 2,662
| 42.655738
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/nnUNetTrainerCE.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
class nnUNetTrainerCE(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super(nnUNetTrainerCE, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.loss = RobustCrossEntropyLoss()
| 1,304
| 53.375
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/nnUNetTrainerNoDA.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import get_no_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset, DataLoader3D, DataLoader2D
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from torch import nn
matplotlib.use("agg")
class nnUNetTrainerNoDA(nnUNetTrainer):
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size,
False, oversample_foreground_percent=self.oversample_foreground_percent
, pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
else:
dl_tr = DataLoader2D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size,
transpose=self.plans.get('transpose_forward'),
oversample_foreground_percent=self.oversample_foreground_percent
, pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,
transpose=self.plans.get('transpose_forward'),
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
return dl_tr, dl_val
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print("INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_no_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
self.was_initialized = True
self.data_aug_params['mirror_axes'] = ()
| 4,742
| 50.554348
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/resampling/nnUNetTrainerV2_resample33.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_resample33(nnUNetTrainerV2):
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
return super().validate(do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian,
overwrite, validation_folder_name, debug, all_in_gpu, segmentation_export_kwargs)
def preprocess_predict_nifti(self, input_files, output_file=None, softmax_ouput_file=None,
mixed_precision: bool = True):
"""
Use this to predict new data
:param input_files:
:param output_file:
:param softmax_ouput_file:
:param mixed_precision:
:return:
"""
print("preprocessing...")
d, s, properties = self.preprocess_patient(input_files)
print("predicting...")
pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"],
mirror_axes=self.data_aug_params['mirror_axes'],
use_sliding_window=True, step_size=0.5,
use_gaussian=True, pad_border_mode='constant',
pad_kwargs={'constant_values': 0},
all_in_gpu=True,
mixed_precision=mixed_precision)[1]
pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])
print("resampling to original spacing and nifti export...")
save_segmentation_nifti_from_softmax(pred, output_file, properties, 3, None, None, None, softmax_ouput_file,
None, force_separate_z=False, interpolation_order_z=3)
print("done")
| 3,131
| 57
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/copies/nnUNetTrainerV2_copies.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
# This stuff is just so that we can check stability of results. Training is nondeterministic and by renaming the trainer
# class we can have several trained models coexist although the trainer is effectively the same
class nnUNetTrainerV2_copy1(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
class nnUNetTrainerV2_copy2(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
class nnUNetTrainerV2_copy3(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
class nnUNetTrainerV2_copy4(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
| 2,527
| 49.56
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/profiling/nnUNetTrainerV2_dummyLoad.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \
nnUNetTrainerV2_noDeepSupervision
from nnunet.training.network_training.nnUNet_variants.profiling.nnUNetTrainerV2_2epochs import nnUNetTrainerV2_5epochs
from torch.cuda.amp import autocast
from torch.nn.utils import clip_grad_norm_
import numpy as np
from torch import nn
class nnUNetTrainerV2_5epochs_dummyLoad(nnUNetTrainerV2_5epochs):
def initialize(self, training=True, force_load_plans=False):
super().initialize(training, force_load_plans)
self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda()
self.some_gt = [torch.round(torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(self.patch_size, k)])) * (self.num_classes - 1)).float().cuda() for k in self.deep_supervision_scales]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data = self.some_batch
target = self.some_gt
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
class nnUNetTrainerV2_5epochs_dummyLoadCEnoDS(nnUNetTrainerV2_noDeepSupervision):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 5
self.loss = RobustCrossEntropyLoss()
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
def initialize(self, training=True, force_load_plans=False):
super().initialize(training, force_load_plans)
self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda()
self.some_gt = torch.round(torch.rand((self.batch_size, *self.patch_size)) * (self.num_classes - 1)).long().cuda()
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data = self.some_batch
target = self.some_gt
self.optimizer.zero_grad()
output = self.network(data)
del data
loss = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None or not torch.cuda.is_available():
loss.backward()
else:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
_ = clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return loss.detach().cpu().numpy()
def run_online_evaluation(self, output, target):
pass
def finish_online_evaluation(self):
pass
| 5,758
| 42.300752
| 199
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/profiling/nnUNetTrainerV2_2epochs.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import torch
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.network_training.nnUNetTrainerV2_DDP import nnUNetTrainerV2_DDP
from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \
nnUNetTrainerV2_noDeepSupervision
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from torch.cuda.amp import autocast
class nnUNetTrainerV2_2epochs(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 2
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
class nnUNetTrainerV2_5epochs(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 5
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
class nnUNetTrainerV2_5epochs_CEnoDS(nnUNetTrainerV2_noDeepSupervision):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 5
self.loss = RobustCrossEntropyLoss()
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target).long()[:, 0]
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def run_online_evaluation(self, output, target):
pass
def finish_online_evaluation(self):
pass
class nnUNetTrainerV2_5epochs_noDS(nnUNetTrainerV2_noDeepSupervision):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 5
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def run_online_evaluation(self, output, target):
pass
def finish_online_evaluation(self):
pass
class nnUNetTrainerV2_DDP_5epochs(nnUNetTrainerV2_DDP):
def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True,
stage=None,
unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False):
super().__init__(plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, distribute_batch_size, fp16)
self.max_num_epochs = 5
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs=None):
pass
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
pass
def save_checkpoint(self, fname, save_optimizer=True):
pass
class nnUNetTrainerV2_DDP_5epochs_dummyLoad(nnUNetTrainerV2_DDP_5epochs):
def initialize(self, training=True, force_load_plans=False):
super().initialize(training, force_load_plans)
self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda()
self.some_gt = [torch.round(torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(self.patch_size, k)])) * (
self.num_classes - 1)).float().cuda() for k in self.deep_supervision_scales]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data = self.some_batch
target = self.some_gt
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.compute_loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.compute_loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
| 13,888
| 46.40273
| 134
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule2.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.learning_rate.poly_lr import poly_lr
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_SGD_fixedSchedule2(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
def maybe_update_lr(self, epoch=None):
"""
here we go one step, then use polyLR
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
if 0 <= ep < 500:
new_lr = self.initial_lr
elif 500 <= ep < 675:
new_lr = self.initial_lr * 0.1
elif ep >= 675:
new_lr = poly_lr(ep - 675, self.max_num_epochs - 675, self.initial_lr * 0.1, 0.9)
else:
raise RuntimeError("Really unexpected things happened, ep=%d" % ep)
self.optimizer.param_groups[0]['lr'] = new_lr
self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr'])
| 1,950
| 39.645833
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_momentum09(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.9, nesterov=True)
self.lr_scheduler = None
| 1,192
| 43.185185
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en4.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.optimizer.ranger import Ranger
class nnUNetTrainerV2_Ranger_lr3en4(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 3e-4
def initialize_optimizer_and_scheduler(self):
self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
weight_decay=self.weight_decay)
self.lr_scheduler = None
| 1,493
| 45.6875
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_fp16(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
assert fp16, "This one only accepts fp16=True"
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
| 1,206
| 47.28
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_cycleAtEnd.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.learning_rate.poly_lr import poly_lr
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
import matplotlib.pyplot as plt
def cycle_lr(current_epoch, cycle_length=100, min_lr=1e-6, max_lr=1e-3):
num_rising = cycle_length // 2
epoch = current_epoch % cycle_length
if epoch < num_rising:
lr = min_lr + (max_lr - min_lr) / num_rising * epoch
else:
lr = max_lr - (max_lr - min_lr) / num_rising * (epoch - num_rising)
return lr
def plot_cycle_lr():
xvals = list(range(1000))
yvals = [cycle_lr(i, 100, 1e-6, 1e-3) for i in xvals]
plt.plot(xvals, yvals)
plt.show()
plt.savefig("/home/fabian/temp.png")
plt.close()
class nnUNetTrainerV2_cycleAtEnd(nnUNetTrainerV2):
"""
after 1000 epoch, run one iteration through the cycle lr schedule. I want to see if the train loss starts
increasing again
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1100
def maybe_update_lr(self, epoch=None):
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
if ep < 1000:
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, 1000, self.initial_lr, 0.9)
self.print_to_log_file("lr:", poly_lr(ep, 1000, self.initial_lr, 0.9))
else:
new_lr = cycle_lr(ep, 100, min_lr=1e-6, max_lr=1e-3) # we don't go all the way back up to initial lr
self.optimizer.param_groups[0]['lr'] = new_lr
self.print_to_log_file("lr:", new_lr)
class nnUNetTrainerV2_cycleAtEnd2(nnUNetTrainerV2):
"""
after 1000 epoch, run one iteration through the cycle lr schedule. I want to see if the train loss starts
increasing again
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1200
def maybe_update_lr(self, epoch=None):
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
if ep < 1000:
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, 1000, self.initial_lr, 0.9)
self.print_to_log_file("lr:", poly_lr(ep, 1000, self.initial_lr, 0.9))
else:
new_lr = cycle_lr(ep, 200, min_lr=1e-6, max_lr=1e-2) # we don't go all the way back up to initial lr
self.optimizer.param_groups[0]['lr'] = new_lr
self.print_to_log_file("lr:", new_lr)
| 3,693
| 40.044444
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_reduceMomentumDuringTraining.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_reduceMomentumDuringTraining(nnUNetTrainerV2):
"""
This implementation will not work with LR scheduler!!!!!!!!!!
After epoch 800, linearly decrease momentum from 0.99 to 0.9
"""
def initialize_optimizer_and_scheduler(self):
current_momentum = 0.99
min_momentum = 0.9
if self.epoch > 800:
current_momentum = current_momentum - (current_momentum - min_momentum) / 200 * (self.epoch - 800)
self.print_to_log_file("current momentum", current_momentum)
assert self.network is not None, "self.initialize_network must be called first"
if self.optimizer is None:
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
else:
# can't reinstantiate because that would break NVIDIA AMP
self.optimizer.param_groups[0]["momentum"] = current_momentum
self.lr_scheduler = None
def on_epoch_end(self):
self.initialize_optimizer_and_scheduler()
return super().on_epoch_end()
| 1,947
| 40.446809
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch.optim import lr_scheduler
class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
def initialize_optimizer_and_scheduler(self):
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def maybe_update_lr(self, epoch=None):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
if self.epoch > 0: # otherwise self.train_loss_MA is None
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def on_epoch_end(self):
return nnUNetTrainer.on_epoch_end(self)
| 2,707
| 52.098039
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr1en2.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.optimizer.ranger import Ranger
class nnUNetTrainerV2_Ranger_lr1en2(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-2
def initialize_optimizer_and_scheduler(self):
self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
weight_decay=self.weight_decay)
self.lr_scheduler = None
| 1,493
| 45.6875
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09in2D.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_momentum09in2D(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
if self.threeD:
momentum = 0.99
else:
momentum = 0.9
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=momentum, nesterov=True)
self.lr_scheduler = None
| 1,293
| 42.133333
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_lr_3en4.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNet_variants.optimizer_and_lr.nnUNetTrainerV2_Adam import nnUNetTrainerV2_Adam
class nnUNetTrainerV2_Adam_nnUNetTrainerlr(nnUNetTrainerV2_Adam):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 3e-4
| 1,246
| 48.88
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum095.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_momentum095(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.95, nesterov=True)
self.lr_scheduler = None
| 1,194
| 43.259259
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_warmup.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_warmup(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1050
def maybe_update_lr(self, epoch=None):
if self.epoch < 50:
# epoch 49 is max
# we increase lr linearly from 0 to initial_lr
lr = (self.epoch + 1) / 50 * self.initial_lr
self.optimizer.param_groups[0]['lr'] = lr
self.print_to_log_file("epoch:", self.epoch, "lr:", lr)
else:
if epoch is not None:
ep = epoch - 49
else:
ep = self.epoch - 49
assert ep > 0, "epoch must be >0"
return super().maybe_update_lr(ep)
| 1,756
| 42.925
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_ReduceOnPlateau.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch.optim import lr_scheduler
class nnUNetTrainerV2_Adam_ReduceOnPlateau(nnUNetTrainerV2):
"""
Same schedule as nnUNetTrainer
"""
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 3e-4
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
amsgrad=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def maybe_update_lr(self, epoch=None):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
if self.epoch > 0 and self.train_loss_MA is not None: # otherwise self.train_loss_MA is None
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def on_epoch_end(self):
return nnUNetTrainer.on_epoch_end(self)
| 2,899
| 50.785714
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Ranger_lr3en3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.training.optimizer.ranger import Ranger
class nnUNetTrainerV2_Ranger_lr3en3(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 3e-3
def initialize_optimizer_and_scheduler(self):
self.optimizer = Ranger(self.network.parameters(), self.initial_lr, k=6, N_sma_threshhold=5,
weight_decay=self.weight_decay)
self.lr_scheduler = None
| 1,493
| 45.6875
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_lrs.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_SGD_lr1en1(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-1
class nnUNetTrainerV2_SGD_lr1en3(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.initial_lr = 1e-3
| 1,610
| 46.382353
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum098.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_momentum098(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.98, nesterov=True)
self.lr_scheduler = None
| 1,194
| 43.259259
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_Adam(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True)
self.lr_scheduler = None
nnUNetTrainerV2_Adam_copy1 = nnUNetTrainerV2_Adam
nnUNetTrainerV2_Adam_copy2 = nnUNetTrainerV2_Adam
nnUNetTrainerV2_Adam_copy3 = nnUNetTrainerV2_Adam
nnUNetTrainerV2_Adam_copy4 = nnUNetTrainerV2_Adam
| 1,245
| 39.193548
| 131
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_fixedSchedule.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_SGD_fixedSchedule(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
def maybe_update_lr(self, epoch=None):
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
if 0 <= ep < 500:
new_lr = self.initial_lr
elif 500 <= ep < 675:
new_lr = self.initial_lr * 0.1
elif 675 <= ep < 850:
new_lr = self.initial_lr * 0.01
elif ep >= 850:
new_lr = self.initial_lr * 0.001
else:
raise RuntimeError("Really unexpected things happened, ep=%d" % ep)
self.optimizer.param_groups[0]['lr'] = new_lr
self.print_to_log_file("lr:", self.optimizer.param_groups[0]['lr'])
| 1,808
| 40.113636
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_insaneDA.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size, get_insaneDA_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch import nn
class nnUNetTrainerV2_insaneDA(nnUNetTrainerV2):
def setup_DA_params(self):
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.65, 1.6)
self.data_aug_params["do_elastic"] = True
self.data_aug_params["elastic_deform_alpha"] = (0., 1300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params["p_eldef"] = 0.2
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['gamma_range'] = (0.6, 2)
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_insaneDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
| 7,799
| 54.319149
| 123
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_noDA.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import get_no_augmentation
from nnunet.training.dataloading.dataset_loading import unpack_dataset, DataLoader3D, DataLoader2D
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from torch import nn
class nnUNetTrainerV2_noDataAugmentation(nnUNetTrainerV2):
def setup_DA_params(self):
super().setup_DA_params()
# important because we need to know in validation and inference that we did not mirror in training
self.data_aug_params["do_mirror"] = False
self.data_aug_params["mirror_axes"] = tuple()
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size,
False, oversample_foreground_percent=self.oversample_foreground_percent
, pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
else:
dl_tr = DataLoader2D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size,
transpose=self.plans.get('transpose_forward'),
oversample_foreground_percent=self.oversample_foreground_percent
, pad_mode="constant", pad_sides=self.pad_all_sides)
dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,
transpose=self.plans.get('transpose_forward'),
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides)
return dl_tr, dl_val
def initialize(self, training=True, force_load_plans=False):
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_no_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
if do_mirroring:
print("WARNING! do_mirroring was True but we cannot do that because we trained without mirroring. "
"do_mirroring was set to False")
do_mirroring = False
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
self.network.do_ds = ds
return ret
nnUNetTrainerV2_noDataAugmentation_copy1 = nnUNetTrainerV2_noDataAugmentation
nnUNetTrainerV2_noDataAugmentation_copy2 = nnUNetTrainerV2_noDataAugmentation
nnUNetTrainerV2_noDataAugmentation_copy3 = nnUNetTrainerV2_noDataAugmentation
nnUNetTrainerV2_noDataAugmentation_copy4 = nnUNetTrainerV2_noDataAugmentation
| 7,886
| 53.770833
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA2.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_DA2(nnUNetTrainerV2):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
if self.threeD:
self.data_aug_params["rotation_p_per_axis"] = 0.5
else:
self.data_aug_params["rotation_p_per_axis"] = 1
self.data_aug_params["do_additive_brightness"] = True
| 1,182
| 35.96875
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from batchgenerators.dataloading import MultiThreadedAugmenter, SingleThreadedAugmenter
from batchgenerators.transforms import Compose, MirrorTransform, GammaTransform, BrightnessTransform, \
SimulateLowResolutionTransform, ContrastAugmentationTransform, BrightnessMultiplicativeTransform, \
GaussianBlurTransform, GaussianNoiseTransform, SegChannelSelectionTransform, \
DataChannelSelectionTransform
from batchgenerators.transforms.spatial_transforms import SpatialTransform_2
from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, NumpyToTensor, RenameTransform
from batchgenerators.utilities.file_and_folder_operations import join
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.custom_transforms import ConvertSegmentationToRegionsTransform, MaskTransform, \
Convert2DTo3DTransform, Convert3DTo2DTransform
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size
from nnunet.training.data_augmentation.downsampling import DownsampleSegForDSTransform3, DownsampleSegForDSTransform2
from nnunet.training.data_augmentation.pyramid_augmentations import \
RemoveRandomConnectedComponentFromOneHotEncodingTransform, ApplyRandomBinaryOperatorTransform, MoveSegAsOneHotToData
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2, maybe_mkdir_p
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import numpy as np
def get_insaneDA_augmentation2(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,
border_val_seg=-1,
seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,
soft_ds=False,
classes=None, pin_memory=True, regions=None):
assert params.get('mirror') is None, "old version of params, use new keyword do_mirror"
tr_transforms = []
if params.get("selected_data_channels") is not None:
tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
# don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!
if params.get("dummy_2D") is not None and params.get("dummy_2D"):
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
else:
ignore_axes = None
tr_transforms.append(SpatialTransform_2(
patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"),
deformation_scale=params.get("eldef_deformation_scale"),
do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"),
angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"),
border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data,
border_mode_seg="constant", border_cval_seg=border_val_seg,
order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"),
p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"),
independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis"),
p_independent_scale_per_axis=params.get("p_independent_scale_per_axis")
))
if params.get("dummy_2D"):
tr_transforms.append(Convert2DTo3DTransform())
# we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color
# channel gets in the way
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15))
tr_transforms.append(GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2,
p_per_channel=0.5))
tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.70, 1.3), p_per_sample=0.15))
tr_transforms.append(ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,
p_per_channel=0.5,
order_downsample=0, order_upsample=3, p_per_sample=0.25,
ignore_axes=ignore_axes))
tr_transforms.append(
GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=0.15)) # inverted gamma
if params.get("do_additive_brightness"):
tr_transforms.append(BrightnessTransform(params.get("additive_brightness_mu"),
params.get("additive_brightness_sigma"),
True, p_per_sample=params.get("additive_brightness_p_per_sample"),
p_per_channel=params.get("additive_brightness_p_per_channel")))
if params.get("do_gamma"):
tr_transforms.append(
GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"),
p_per_sample=params["p_gamma"]))
if params.get("do_mirror") or params.get("mirror"):
tr_transforms.append(MirrorTransform(params.get("mirror_axes")))
if params.get("mask_was_used_for_normalization") is not None:
mask_was_used_for_normalization = params.get("mask_was_used_for_normalization")
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
if params.get("cascade_do_cascade_augmentations") and not None and params.get(
"cascade_do_cascade_augmentations"):
if params.get("cascade_random_binary_transform_p") > 0:
tr_transforms.append(ApplyRandomBinaryOperatorTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
p_per_sample=params.get("cascade_random_binary_transform_p"),
key="data",
strel_size=params.get("cascade_random_binary_transform_size")))
if params.get("cascade_remove_conn_comp_p") > 0:
tr_transforms.append(
RemoveRandomConnectedComponentFromOneHotEncodingTransform(
channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)),
key="data",
p_per_sample=params.get("cascade_remove_conn_comp_p"),
fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"),
dont_do_if_covers_more_than_X_percent=params.get(
"cascade_remove_conn_comp_fill_with_other_class_p")))
tr_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),
params.get("num_cached_per_thread"),
seeds=seeds_train, pin_memory=pin_memory)
#batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)
val_transforms = []
val_transforms.append(RemoveLabelTransform(-1, 0))
if params.get("selected_data_channels") is not None:
val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels")))
if params.get("selected_seg_channels") is not None:
val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels")))
if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"):
val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if regions is not None:
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
if deep_supervision_scales is not None:
if soft_ds:
assert classes is not None
val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))
else:
val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target',
output_key='target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1),
params.get("num_cached_per_thread"),
seeds=seeds_val, pin_memory=pin_memory)
return batchgenerator_train, batchgenerator_val
class nnUNetTrainerV2_DA3(nnUNetTrainerV2):
def setup_DA_params(self):
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["p_rot"] = 0.3
self.data_aug_params["scale_range"] = (0.65, 1.6)
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_elastic"] = True
self.data_aug_params["p_eldef"] = 0.3
self.data_aug_params["eldef_deformation_scale"] = (0, 0.25)
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params['gamma_range'] = (0.5, 1.6)
self.data_aug_params['num_cached_per_thread'] = 4
def initialize(self, training=True, force_load_plans=False):
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_insaneDA_augmentation2(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'][:, 0], width=512, height=512)
import IPython;IPython.embed()"""
class nnUNetTrainerV2_DA3_BN(nnUNetTrainerV2_DA3):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| 19,740
| 55.402857
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_independentScalePerAxis.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_independentScalePerAxis(nnUNetTrainerV2):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
| 976
| 41.478261
| 114
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.